var/home/core/zuul-output/0000755000175000017500000000000015147014026014525 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015147016503015473 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000115202215147016330020251 0ustar corecoreikubelet.log_o[;r)Br'o-n(!9t%Cs7}g/غIsdr.k9Gf l"mv?_eGbuuțx{w7ݭ7֫}% oo/q3m^]/o?8.7oW}ʋghewx/mX,ojŻ ^Tb3b#׳:}}ny;#)a "b BLc?^^4[ftlR%KF^j 8DΆgS^Kz۞_W#|`zIlp_@oEy5 fs&2x*g+W4m ɭiE߳Kf>|\@E1菌.aLk>2M.*x6Ql"%qYHzn4}*|dd#)3c 0'Jw A57&Q"ԉQIF$%* 4B.K$*/Gmt΍L/1/ %T%e63I[wH :7sL0x.`6)ɚL}ӄ]C }I4Vv@%٘e#dc0Fn 촂iHSr`岮X7̝4?qޗh/9Y@$9GAOI}g4XBu!.F"`a"BD) ᧁQZ-D\h]Q!]Z8HGU=yGoW~wSL2uQO)qai]>yE*,?k 9Z29}}(4ҲIFyG -^W6yY<*uvf d |TRZ;j?| |!I糓 sw`{s0Aȶ9W E%*mG:tëoG(;h0!}qfJz硂Ϧ4Ck9]٣Z%T%x~5r.NM0}ٳv0^͐ J+Wni|廂a('IJqM+6Ɍ[=<:|a+.:a4nՒwYbaRvHa}dkD̶*';ک|s_}8yj,('GrgTZ'U鋊TqOſ * /Ijo!՟8`"j}zӲ$k3jS|C7;A)͎V.r?t\WU1ojjr<~Tq> `=tJ!aݡ=h6Yݭ#Ⱦzu\0Ac/T%;m ~S`#u.Џ1qNp&gK60nqtƅ": C@!P q]G0,d%1}Uhs;H?)M"뛲@.Cs*H _0:]uw`C}-{C):fUr6v`mSΟ1c/n߭!'Y3T0|9ē7$3|hxY/O$Zøu32EʉD'MS1}t i:Y`cФIX0$lη˽`!i:ګPSPٔ3@5;ȕ}PkڪH9' |":", 1Ҫ8 %lg&:2JC!Mjܽwci`du듎q+;C'16FgVlWaaB)"F,u@30YQg˾_YҊŏ#_f^ TD=VAKNl4Kš4GScѦa0 J ()¾5m'p/\խX\=z,Mw˭x:qu礛WԓL!$ XH\z:dHElL(uHR0i#q%]!=t_쾋-, vW~* ^g/5n]FhNU˿oۂ6C9C7sn,kje*;iΓG^ۃn󨔖I@[ Lv Fyw9J֥WmN^<.eܢMρ'JÖŢո%gQ=p2YaI"&ư%wm+`VLJsC>?5rk{-3Ss`y_C}Q v,{*)ߎ% qƦat:D=uNvE=-dZB4']a.QO:#'6RE'E3 */HAYk%C6Θ%|5u=kkN2;{#ޢ1c qw ˩~C6ȴ .#Sqη9]5zoX#Z{/ҭ٫3@/%hJKZ|Q;|mACYm*H@:FUф(vcD%F"i ' VVdmcOTKpwq.M?m12N[=tu#Ⱦ7,VW.JE:PgXoΰUv:ΰdɆΰ (ΰ0eTUgXun[g, ׽-t! Fk ic=Ú,u̹/*]lc ؠF e ˒єdV3_F;TM\jP' HT؋ BvaijI{=]6bq=b|ˍ%ʺsl!C>Egl1$9  ֲQ$'dJG<6=rw^)'Hz ȍq!a}P `LC ءzCwS%'m'vn[>77ĐrX(K&Y5+$wL#ɽ 4d-bbdAJ?w:P>n^2] e}gjFX@&avF묇cTy^}m .Ŏ7Uֻ󂊹P-\!3^.Y9[XԦo Έ')Ji.VՕH4~)(kKC&wfm#Y~!%rpWMEWMjbn(ek~iQ)à/2,?O Y]Q4`Iz_*2coT'ƟlQ.Ff!bpRw@\6"yr+i37Z_j*YLfnYJ~Z~okJX ?A?gU3U;,ד1t7lJ#wՆ;I|p"+I4ˬZcն a.1wXhxDI:;.^m9W_c.4z+ϟMn?!ԫ5H&=JkܓhkB\LQ"<LxeLo4l_m24^3.{oɼʪ~75/nQ?s d|pxu\uw?=QR -Mݞίk@Pc n1æ*m$=4Dbs+J \EƄզ}@۶(ߐ/ۼ𹫘qݎt7Ym݃|M$ 6.x5 TMXbXj-P\jА޴y$j`ROA"EkuS#q * CƂ lu" yo6"3껝I~flQ~NCBX`]ڦÞhkXO _-Qy2$?T3ͤEZ긊mۘ$XD.bͮW`AީClСw5/lbl[N*t*@56."D/< {Dۥ sLxZn$N(lYiV =?_e^0)?]{ @| 6+#gPX>Bk2_@L `CZ?z3~ }[ tŪ)۲-9ֆP}b&x Uhm._O 4m6^^osVЦ+*@5Fˢg'!>$]0 5_glg}릅h:@61Xv` 5DFnx ˭jCtu,R|ۯG8`&ו:ݓ3<:~iXN9`2ŦzhѤ^ MW`c?&d.'[\]}7A[?~R6*.9t,綨 3 6DFe^u; +֡X< paan}7ftJ^%0\?mg5k][ip4@]p6Uu|܀|Kx6خQU2KTǺ.ȕPQVzWuk{n#NWj8+\[ ?yiI~fs[:.۽ '5nWppH? 8>X+m7_Z`V j[ s3nϏT=1:T <= pDCm3-b _F(/f<8sl, 0۬Z"X.~b٦G3TE.֣eմi<~ik[m9뀥!cNIl8y$~\T B "2j*ҕ;ێIs ɛqQQKY`\ +\0(FęRQ hN œ@n|Vo|6 8~J[,o%l%!%tyNO}}=ʬ-'vlQ]m"ifӠ1˟ud9)˔~BѤ]һS8]uBi( Ql{]UcLxٻa,2r(#'CDd2݄kTxn@v7^58þ Ţ&VY+yn~F8I !6WB3C%X)ybLFB%X2U6vw8uUF+X|YukXxVO(+gIQp؎Z{TcR@MSRδ~+1æ|mq՗5$B᲋eY(|*磎\Dži`dZe j'V!Mu@ KV{XץF .Jg< ƜINs:b zĄu3=Az4 u5'og^s7`Rzu-anOIq;6z( rx߅ euPvIɦ7聀t>G;_H;2ʗ6 h6QװxmR JQUbTP2j˔Ni)C)HKE"$ӝ!@2<Bq 2oh80,kNA7,?ע|tC3.㤣TiHEIǢƅaeGF$ u2`d)/-st{E1kٌS*#¦۵_Vu3ЩpRIDr/TxF8g4sѓ{%w .ʕ+84ztT:eEK[[;0(1Q@ET0>@wY)aL5ׄӫ A^%f+[`sb˟(]m`F3 W((!5F-9]dDqL&RΖd}})7 k11 K ;%v'_3 dG8d t#MTU']h7^)O>?~?_ȿM4ə#a&Xi`O}6a-xm`8@;of,![0-7 4f kUy:M֖Esa./zʕy[/ݩqz2¼&'QxJE{cZ7C:?pM z*"#窾+ HsOt۩%͟A498SwWv|jNQ=-[ӓI+mj(^>c/"ɭex^k$# $V :]PGszyH(^jJ=䄸-m!AdEږG)շj#v;#y/hbv BO Iߒ {I7!UՆGIl HƗbd#HAF:iI }+2kK:Sov3b:1)'A6@\2X#Ih9N ̢t-mfeF;gUаQ/ .D%ES*;OLRX[vDb:7a}YF30H #iSpʳ]'_'ĕ -׉6tfЮ$zͪO_sYq+q艻*vzh5~Yy;,DiYTP;o./~^.6+zZFD& m@WXe{sa 2tc^XS?irG#^ŲDI'H_Ȯ;RJ&GT.Kwj;of¬zHmmS2ҒN'=zAΈ\b*K ڤUy""&D@iS=3&N+ǵtX^7ǩX"CA⥎å+4@{D/-:u5I꾧fY iʱ= %lHsd6+H~ Δ,&颒$tSL{yєYa$ H>t~q؈xRmkscXQG~gD20zQ*%iQI$!h/Vo^:y1(t˥C"*FFDEMAƚh $ /ɓzwG1Ƙl"oN:*xmS}V<"dH,^)?CpҒ7UΊ,*n.֙J߾?Ϲhӷƀc"@9Fў-Zm1_tH[A$lVE%BDI yȒv $FO[axr Y#%b Hw)j4&hCU_8xS] _N_Z6KhwefӞ@蹃DROo X"%q7<# '9l%w:9^1ee-EKQ'<1=iUNiAp(-I*#iq&CpB.$lٴާt!jU_L~Tb_,֪r>8P_䅱lw1ù=LAЦz38ckʖYz ~kQRL Q rGQ/ȆMC)vg1Xa!&'0Dp\~^=7jv "8O AfI; P|ޓܜ 8qܦzl5tw@,Mڴg$%82h7էoaz32h>`XT>%)pQ}Tgĸ6Coɲ=8f`KݜȆqDDbZ:B#O^?tNGw\Q.pPO @:Cg9dTcxRk&%])ў}VLN]Nbjgg`d]LGϸ.yҵUCL(us6*>B 2K^ sBciۨvtl:J;quӋkKϮ듃ԁ6Y.0O۾'8V%1M@)uIw].5km~Ҷ綝R(mtV3rșjmjJItHڒz>6nOj5~IJ|~!yKڮ2 h 3x}~ے4WYr9Ts] AA$ұ}21;qbUwRK #}u'tLi'^Y&,mCM)eu㠥Ѻ\a}1:V1zMzT}R,IA e<%!vĉq|?mtB|A ?dXuWLGml?*uTC̶V`FVY>ECmDnG+UaKtȃbeb筃kݴO~f^⊈ 8MK?:mM;ߵoz+O~e3݌ƺ(ܸf)*gCQE*pp^~x܃`U'A~E90t~8-2S󹞙nk56s&"mgVKA: X>7QQ-CDC'| #]Y1E-$nP4N0#C'dvܸȯ.vIH"ŐR ;@~y>Kv{) 9AG ćͩ$.!б~N8i"1KФ\L7/,U@.ڮO?mُa ې!rGHw@56DǑq LA!&mYJ*ixz2*{_;IYJXFfQ* 0kA".mݡ"3`Rd1_u6d逖`7xGMf}k/⨼0Κ_pLq7k!dT x삖A7 u/~&ӄMu.<|yi I?@)XJ7{ޱ?Q]{#\4ZfR-dVaz./f+yGNMGOK?2_~3\z=y}^G$*A! IcuR.o=MZ9zu b#s9@*иrI@*qQN||Ix;I}&ݢ6ɢ}{]x}_o>Mm8S]~(EX{v8FHӜ"D$aǽO8'1lfYuB!6!=?8[Y|-ɬeǪzd;-s~CM>e:9[_v~\:P ؇'k01Q1jlX)/ΏL+NhBUx~Ga>Z"Q_wjTLRˀtL L+BT҂ll魳cf[L̎`;rK+S- (J[(6 b F? ZvƂcW+dˍ-m𢛲@ms~}3ɱ© R$ T5%:zZ甎܋)`ŰJ38!;NfHohVbK :S50exU}W`upHЍE_fNTU*q%bq@/5q0);F74~'*z[\M-~#aSmMÉB2Nnʇ)bAg`u2t"8U [tJYSk, "vu\h1Yhl~[mhm+F(g 6+YtHgd/}7m]Q!Mę5bR!JbV>&w6οH+NL$]p>8UU>Ѫg39Yg>OF9V?SAT~:gGt $*}aQ.Zi~%K\rfm$%ɪq(%W>*Hg>KStE)KS1z2"h%^NEN?  hxnd/)O{,:خcX1nIaJ/t4J\bƀWc-d4M^d/ ʂK0`v%"s#PCoT/*,:[4b=]N&, ,B82^WK9EHLPm))2.9ȱ  QAcBC-|$M\^B!`}M^t+C~Lb }D>{N{Vt)tpDN,FCz~$)*417l;V iэ(_,j]$9O+/Sh]ice wy\Mڗ$,DJ|lj*à␻,?XAe0bX@ h0[}BU0v']#Vo !ې: Z%ƶ(fl>'"Bg< 0^_d0Y@2!ӸfZ{Ibi/^cygwדzY'Ź$:fr;)ٔf ՠ3Kcxwg*EQU{$Sڸ3x~ 5clgSAW"X Pҿ.ظwyV}̒KX9U1>V..W%GX +Uvzg=npu{do#Vb4ra\sNC/T"*!k愨}plm@+@gSUX覽t01:)6kSL9Ug6rEr(3{ xRP8_S( $?uk| ]bP\vۗ晋cgLz2r~MMp!~~h?ljUc>rw}xxݸǻ*Wu{}M?\GSߋ2ꮺ5w"7U0)lۨB0ח*zW߬V}Z۫ܨJ<]B=\>V7¯8nq~q?A-?T_qOq?5-3 |q|w.dަ'/Y?> (<2y. ">8YAC| w&5fɹ(ȊVã50z)la.~LlQx[b&Pĥx BjIKn"@+z'}ũrDks^F\`%Di5~cZ*sXLqQ$q6v+jRcepO}[ s\VF5vROq%mX-RÈlб 6jf/AfN vRPػ.6<'"6dv .z{I>|&ׇ4Ăw4 [P{]"}r1殲)ߚA 2J1SGpw>ٕQѱ vb;pV ^WO+į1tq61W vzZ U'=҅}rZ:T#\_:ď);KX!LHuQ (6c94Ce|u$4a?"1] `Wa+m𢛲`Rs _I@U8jxɕͽf3[Pg%,IR Ř`QbmүcH&CLlvLҼé1ivGgJ+u7Τ!ljK1SpHR>:YF2cU(77eGG\ m#Tvmە8[,)4\\=V~?C~>_) cxF;;Ds'n [&8NJP5H2Զj{RC>he:ա+e/.I0\lWoӊĭYcxN^SPiMrFI_"*l§,̀+ å} .[c&SX( ( =X?D5ۙ@m cEpR?H0F>v6A*:W?*nzfw*B#d[se$U>tLNÔ+XX߇`cu0:U[tp^}{>H4z 4 (DtH-ʐ?sk7iIbΏ%T}v}e{aBs˞L=ilNeb]nltwfCEI"*S k`u ygz[~S [j3+sE.,uDΡ1R:Vݐ/CBc˾] shGՙf 2+);W{@dlG)%عF&4D&u.Im9c$A$Dfj-ء^6&#OȯTgرBӆI t[ 5)l>MR2ǂv JpU1cJpրj&*ߗEЍ0U#X) bpNVYSD1౱UR}UR,:lơ2<8"˓MlA2 KvP8 I7D Oj>;V|a|`U>D*KS;|:xI/ió21׭ȦS!e^t+28b$d:z4 .}gRcƈ^ʮC^0l[hl"য*6 ny!HQ=GOf"8vAq&*țTOWse~ (5TX%/8vS:w}[ą qf2Lυi lm/+QD4t.P*2V J`\g2%tJ4vX[7g"z{1|\*& >Vv:V^S7{{u%[^g=pn]Y#&ߓTί_z7e&ӃCx;xLh+NOEp";SB/eWٹ`64F 2AhF{Ɩ;>87DǍ-~e;\26Lة:*mUAN=VޮL> jwB}ѹ .MVfz0Ïd0l?7- }|>TT%9d-9UK=&l&~g&i"L{vrQۻou}q}hn+.{pWEqws]]|/ǫ\}/J.MLmc ԗWrU}/Ǜ+sYn[ﯾeywyY]]¨Kpx c./mo;ߟRy*4݀wm&8֨Or4 &+Bs=8'kP 3 |}44S8UXi;f;VE7e4AdX-fS烠1Uܦ$lznlq"җ^s RTn|RKm;ԻZ3)`S!9| ?}m*2@"G{yZ${˪A6yq>Elq*E< NX9@: Ih~|Y4sopp|v1f2춓t$kW/%V83s' AT7zE=~$vlKR\IaSRXEV1Q6֪TY:dFj0F^]LHu2ݙKj9WSG40U(P"/T)Jq T K<ή'=d EPe:͊۳xUOyH#S5 RɚJd<ОxTS5uT!ve#iX$0KO7"9d/NS2"/^BRn-,&f랯YAhhuaNH~w kKӵ֒תdB y)dLxcH ݰuܰu\M{n̳_;O^'K\{7oLOGt1uõ]/TӄkR?$  (wLwKZBQѤ\/aJ]g^,iRQp"#LԨp0)yU@߫JG4""EXƮs^Yx?=jcj;kUTzj-/TY,\y--2#LXIaiZ#;n~~ɋq7^XTլz£ߏ/HG_ )nl}on3#e`SF Nf[EG,iQ1lgiW UW; {Y7E5/aQF)˴o0Kgu%߈5޴RD_Pq-og*oqű*&qd>+Yw5M}$F3:ԛ"\c8BcTkߜ%]+@y!L*^(v^.OjO2bDN v\&wHT{g*Wb5+>nƄfepH4ݽ/W0D)?;"q HG:8N[3lM(Idu00dBS;!ՠ mQ'6Bt 8Dq8?<$8~1D6C>KXG7gX޿=XI}ck>$)f{;>9[o6nغ*o9 !r| ڇo1){PV6 Dww7 xOuI4|6LT #)꼺Y蓸9i>D^f~lRB8m^aZf2Ӝ%Nv-!BorT'*Sm J"m6x p'$twjJ4&I">4Ggdp X`.BI5<9NR(dl}⩺j.Kuu%R]M(?A1b۳M{mfIʴ_h%ËqREg!B"'|#B#1n} Ҁ3VESmww"ƪhv>ݣ֔I,+npn(pbfD˻'%2qg뙦q!09<Hh^Ji#I*&n nIV<< q b>-iGV DULYs*P*."DZӠ^Ҙl1Ɋnە@sx&E3_=9w|D l>FYViJ4{y / ,YΕQa cG(*dưf*+T 8muhf3hV,ˈh 5Q8F#̇i>'bfM 66O^sy}xC\T-k*q%992_R8Mݦ#L1Quռƫ fvGAȠ^.Vi.ƈ̾)S~DHٴ r:v`RX[7Yl`^D LO5A]Ċo/-"Ak.{$d:$'wW2>[<0ÓoIwaJRl(nຐZUT{̔o@xoUf}ؽa![}W\V7ƴ`edl爷iVByN' 8inpUlQ!%5-y6>aaa:gbW )]8}a]{R^HcdІ< U]5k*KLAĒ6ŽՇ3ܦh@p9ay4@0xPBֆk~ ;d0R3N_1Zo_u&-9p7FC!貝 -w?j+\i UC18.҂Zn.zM1PjmPKd.aMZܩ0q¬X+R œZuڃXbuW}5CyXv|Zha.$c7UU @D-Re[=7Q@U5̾>ylm5dq4D1i%om+n|r]5 IZ]auI"YVZ]D/µ"}[8}W. ~l-1z~?(V~&-`$ok〬Q 6h7fKP뙧` _Ԍ?jܗR@.AF RƜ9ޜΜG%_xQd߻㿭fk9N?|7g<\ƶ6uw~\/0\8n+.WܰצicH~q"[3-;u`AEq޷4Cӝ4qt(M4A.  U ~6YA\Ӌe?ۍ%V$\Q6nӹJmε=A}qMzp됖(4H8MD@X+)>Dǵnx|&&]_kR6Ls)[ ɟuI}`mDO3GnJ]}{z ŭ0ãKmE{7Yvnujdm3y Rmom4"@"MTj@2 $,^/p^q9dT>x:Qmγ`S[ƽM==F~ePҠ{%^d3\Y.n\7\#k;ܖB0g s޼X޺4kS2: HȈ}ծu\@$ ׇ'd,nNԫ.Lk^и96F5PVqٞ&WnjMUNں\i]sE aޓ]1=BUgqD@yzceτ/ ݤ%i}gjSWZ'&cethVYsm۸q+wܚ0C ̓~&C@N.0}?nc*۷.4@ R BgQhʅc5O6b*o)u#du9n8{$xl:J Bh3;^1aҫk{Q'Ch:f+``Y߻'}fSبh _Bk!4p7Y9P!xv.W Wjh3S7:~ ;80Tjptw}i \(>oH|Bo+# bQqP! /P3B3RlF,nNϓ{ h<RN.j!t}"PLFglmޝ o?qus5;129ˆSRг.L=ptxvxã_=n[u֖;}HDG?J' >1ު0@10*K/0# 8{y pi.,@hqVBL\~ Qw WU![vDE(gP% jF@.)tA@,k/q<YQڊ?FBd}(zapRH}ec`Э rLtS%±˛%u*xȑ 8v'H׼T8&9ZhIYS p Y*}P;vG\}]#݄ nf-d w8se_I׹ Mt!P^u!Xl7p E JXpM=-_g 蒣p Υ(\.Yb)4X68:w|Es˨7ȴXrj]ɫ BjV ,1ޓYdTb nb0Ά*`9ŒXڟ?\aSqt թNN3ILZ:&Τl:asȜirɊ^4ܚ#@ۀ'f:"1my_fY f6z61Nl7tYh O!^-K( Yq{`"4d~BmF\М[™$ZUg#4DΛ8bdW#lw7gH @U7pJq1 hpM۸,F߬cFk_cbz!)Eq`=iҼ!._3Y2}ǒ&Df(\$678š^檖Ak8d2 X/1@BsKWOVH<GSזΨy 2M,2ίK*?M54C$1PJdO]3vَ)/;~\L(H"oHut`zf!)Uq*[ՁUÝo V>6Z*)6('ǯAJG!ھDQ/f^^zC'z>D2~8Hm)v϶UͲNh!B.l-=I6)\葊tB 7>\  $:A} P⏼m2ɦ1hT ٨$CH=AhIV[BjUCNNe77W)A+n,0VÃub6bb@AxZ|+jS`@-ï dK.rSq|8odpOڸ.1j ^Wa E09 9se!~ZxQWB@ tݐ]˗u8pvGt UU8 ^sY uK;4yky֦kqlcmyE[jo ! }90bS~;74`G+IlXe S?G >pI>0ێ[9[9]u. i 8Ɩodm^E9ަBˠPP׽ME]ƙ^LQ[Tm .`TсJҀc3B^[C(]%@(ݝP]]R˘' *sTS4>eh'.Ve;vG*{"lBUBuv'ُP牄:;W ;w'G("|BUBuw'ݏP;@;~zO$ہPPB #"UB *H؁``B #4x"BtkA?O)NPn!&<뺼i~h"z0pʼ@ݜ]6s_h;U\#J8*,BqL`|UCHqۧj,}Oޕ1<ʦi˟$`VdeJ~ͦ/*p*AfԆ=iP4ԟ͓Hq3'& 4/wj1*@ђ:Nb* yh4WßE&rtѱLz(zjS(LE`W-NCVKSz"/t~4wh[I>#dCa<_QAEૅoo^YUPʑ$^ _80D*bǾē &cdt Zf_xkZo)<-/$ZD2QMLv #c5ⱡwE0ˁuMxeE矯48sG~V=dRWHS!H{L·f+ V6,jSrhɪ l?3 J gUCcC,/@{&s+ ңkNY(ON5dq;ޞhΪJ@59&8:׽#.((Of#0CױS"-)d&Lp r+ 9]̢CUP6f}6j;8:C__ywRU,p:Xt#j( l}KoT^Q$x@lSǠ*KgQpŸ8BIY3k."4j 9j%~KRBi@pC@{K>,hP k"S =r~íw4GoiE:S_2Д|QAfjY~d`LN8$a0!j֬z u\at4@yKRvVΩ r01KH9 FD`Kd'R"n>p'穂tW~bFO?x0ƃv*:Elӽku{ X_çnZ~ [.$1ʽמ̲Mu>k~˵w{_DyTy%H\dY[FYwQo{9kY`$}TU܁[Üjorӷd}VhVbځ |βlr!|,GSO88JRYԪ&FڀZWu☭U|/cpVZЎp_z0-10{ A>l` ^r3D~w󞔼 i64F`-dLƅ]0=f>vfKߨv hM.庪@_)!00%kZ&rwNA&]!Ώ}s@'\AeDKe@_F2Wf66W2y $Rݗ¼瀢Kuh|Czi",R]YsJ@g(U{#ʠ;Y pn`7Ja Cթ b} ykviMeKaD]{euSU=sw+ taW\V,oI1*d#d,hvv&BY&W22N`cZiߎ%Q*MpbԿpyek.']ry@Cbt)z٬\.vb6U3kU?O\b WxJp )R<r]m2CHZBCGI>F,7zbV`ho(vၴ%5L%?%PynUْ„f5 qwy^v*'&>L$R*Rݓ\|a }o1 x#oģLoB[1ѵYGheau׍^EE9FxTȡγiJQ54.ikzfwL-\ykoU-4fU-p~FGH63j~d!PyIi4GM# ١V_#e1WyK&4Gfݨ{p(7"FlӿQM{mڜEwmLG)h.k^b ;瓒,= xLtI#crPqDcmKn$2Ay gwE ~It>NjC}B5%Q>ّp"8~YE~n+">S{CbV}#/l26?򮴷%}VŽ/F  @7*ѢD=l O5>Ѱkt. $%ޣ>ͨ ?=.rxS_s[Ƒ?_K-裦X<-stFqG|o7Ф9Ϳa˛s4 ݝhqͻZZjǿw)=2XN=<ٮ2/o$-D m=cnIݮ2n7e|[_vs'x栙bL`tF"f?P$A[.Z~mSv#F;>V/x1/M#&zdÜJWBZxN(ouIp$@Bek{'t#p [ŧ$7Iḯ1~,.{AX袑\ĘA[AvxnU*Q~0rCQm~xqJNp͋)bMna*zk (oD^2$`+MR$i¬.8NLQ x+hX\^pH:68>GS Tҁ@E,k6\$X=?ܤi E0CA1ǧNQ(d0"ni R$Az\4b]Ʊ-mS,Fû-M0*mXd,voxVȣKÇb| }1jCڃΆ\#Eҙ~tۼ}킣Hϔ* #eXo2&"ʾ&O8R$DZ ^*x>jREhZ;&:$llW?ZF}p ig zB1" =j@o+m2AFƤH:b*C{w֌Pꚋ;m 8QˑWfV#T^@I[ k= #I˲ N=$WĻ꾓 R|Vc%톙LpP8Sw];~ X C[ "+o23Ǩe`"BoMwT(];{j\P,|sGsRD|ŧPRf)RP$} bmhsw[ >F5y-[5ehJ"鵃94.m;?{:*AMJ)Q+_.84;} "z72n WD^V,m#>**)(N-]pL9Gg j|j@9kM3EԸz薲 'צax!1A|Vl], =jX۔3_ixwR+)׋'Jc)q}l^}_=~L̋iu  &}~~ꂣ?/ԋu8\54FWD͍C/OHMtv,?ݿl{șPz~Cq&z,U(Fn5*,3nVHk'S}VZd!eQqFH6) +'7x֓CH~Qw͔2WX;vK5ߴS*KtQYCO(bIK0bOBF)A+N) ]XE|`oc:9#s"7%'V.Ldr`xx+4~YV5Jʰ ]ݗ. 6Ń9c~* 5b ǶG%FAL#TVS.?Sx9ج\Cq{05vqpÌ 3^7z_+xИR׹:jzGzӽOuwy0RU'J(ۍJH9sv^G=6^z&lCw~I)tJ5Hr7\A0CI(e8$~[.]plVNq<ڔ-]{yzKwNugM?v9.zwRJoRݏ`K#IJM ~l:g/ ]b!6>-^.W/ 㦈.0:?=ۗ.3yi')j!u"d^vfp:"OX4C.+iRJꞽz^GJopH_ʬrD O˲K%y]srKI['aݕxv/8YVUXGۖZEeס6Qpng5}A^ NjIARaX#sV8'I%H7_Ot;bqB-)U(WI∖ .ދ8MN8vñ %.]>BIJYx)1^Di3|ܶHǽBeݬVM+v BxZ-O+׃.%=Sm{>=xȓDeu z^Iiw`_{KL3Gk/:>/#yz[s~ۀIĂtmDk.wzjCC})n,,W.zA;lq_:x4x>.y&dbz{k6ωD*)zU ɠG$c^ߺ.8*u+JƜE3CJp"*0-9x:%t".L Mf`ꃀ[87*mXP$=oD{u4;VG\d?x-E98НKGUhLD[y$x@ίuSd\՞ {saHyLO[,W `IXnptާݽ- hy|2$߹u|V<'H<mfw\ְIk^$9S70[ٻMMKBx4J}m>q0kޥe- KYe$'"|҃z%'lE 'QD@x7MSag8 FU#E] f2+~?5!k~DZj&ym½|qlycϒp4tDmǙ3o1*U_[s>p`|ݯ$3OpC  s.5REꁍkeqO^dE| ?h2xy}vɚHD5۾DDn9˺xS6zz@^'@PpF j~u曊R̵l<5fCh ؎N!́ˋ75lbwߖ7Eы~;ɸ蝜x v ƨPxcܭ'/A69}>d4/?E^daDdQňtbB=_<VOM r!_s*X^(dl~'3I.nw- tWV#"wLGϽ 7GҤ:Ej{tKU7X*_4s DrE?$-8ڥ Sbe0CYBF+g4=? t=Gn6d=K{ 2%-5R9 ez|B{t֟~73& V/xϝɢD#(Yٱ_ ?yUulܸDڗWQ|E܅!0'3mJnI)Sx7Ga z63G 02Bƅ@Ḩeߜ2BRW1i',>,8ml$!jY:~ka]ΞVn\} t _.ҫC0V;KnQyY^?}'aiՃ;.F'%PXQ-yB *djYjK.8źB_ZW |%;@01()-QZv Tϋ]sZ*E/_"n{070)TXc" - ։x^B,$:dUywd4j=` 4T[#ҥWdQ%?2C]6n2ԃZi|˘, k•XgoW,c^;a0xʍ_CΏ:pȲq@?m/WȬV͠+aAccb;liϑ%8%;9l59FYO('b@weGweܟbDЧWr#ha11T\"4@\~ A᯵s_A>Oi<]B-fÐOILmT#_,~HTenfxCbt<Mߟ=Fm5g;cR$`_ <-vvCGLĵ@ʃO؞p$gopmOB]SqVEہγf~}rFb)WIQ?o5-׮Oy6qUpMdR[^~w7֥fڟV)߼g/VZՓ oU !q¿o!~Eìρ9Tٲ^[ |rћY{v{G\ߞ.SS~.s{7vޅRz/Fh$ GLj/\+)U u%pZ ͏~2[(=|xB;|ɽ;gA#7@gox!W"sC/Shx\խLk'Zp{EDj*j DJn"",H0^ QK0~:_uh v 0>v7/{n.y+znwq<$fdrh7*Ҥ"TZZi9W"I$z$3Q&\;$Vي/b(μx)nӊ­."|i2jI{:2۸{kFH(r0jZ !VɄSSc8JSMP÷B(eLibOSL^[3VH3V@3:К*\ J(Xr1(ìp:&ZK =ZщHa$ªM+jM+oT({YYu0x'Xā}Q6̪Z9NhlLeՠob֌5Ҍ7 hƪeٺfѤ$MZ4 0˩@Tb8L,&L')s! X@D'9ʵ5iE4ikƫ% BR|LzuL'` m"$oPH`n&.aLZ.v[('[ KppڇZP.T3{88:Ɇ6'Gͫ`EX_/qt-苾)^F7fė rWNQ@Rwyya֣A1`;w%k Z5?tW*pv  ((U|w̼> /ߌCSv.ezU?x`_sk?48X}2 ɤ΢Ong{kO4/<PQ' Qpb?V/CF32 >fk3Y0L?e/-"V`=N{ |έ3S;c,#Bz]$%ZA1߾A ^ Jj4o~zwBVVh*̲2sxljք'/SΗ@j|؆@nx:6&5T3(b{nⶈ:h66CS5Ƀb(y ĔQ3 \03Kîl&;\kZ4/ _~p0ԓ mḶ{`W+/;TW#{=ryƈf"q@!վBBŃ˞"אJ*^TY5@,xJQ6pE 45J]**KW%RUQB"ɇnJ(}^E\5A*Ҽ},U\r`nԈ2 <7;DAu#J@\o" Ar71:S\3h6B+a}^;zqyq-ײ+S#V1L?HH[Qp5ߪ[gO, !۷nP1giAwHo)5iA7c5f2F$+ߛث}_͖[{P{ X4sd5õmI.A(w%hyʌ*mvuoQr$*h[",2w^S |O}ڎ&±t/[sHL0>X,.}%̕u]p" 0ppD[ 42HҨsT;$viѰZ'+;feEˢ㌝ *yLιоr*eJeUXI,Ⱥ(**@A-8RE]X -0ø{q$\3`6v,uFYϽ@- pp~Dv+Qi,%hY?;P ܞx!7'tH6nnĵG10e{$0m7zo;(-9)EUz}d6GJ4mf3XX"z-׍sz(p\JtQ" !G澇D ;* jvGC(! YY-k5y~,s3Y׷䟯<;=9?|㚓K-xI`t䶓Ӌ#$sв#{xO_{>^fyz^Y߽,紉˲Vz5<[|_sI,;)>#j_ʉĽItD+i E/~m gZlm-F.֟ғC2t>+kvt!sBҺ0 #آp{R"(vˉd<̣_N]m *!iin=COJ`gj3r>.;ƙLNMzi'cF{aNYۯFfó8=,oD~8tـ87Bիww}pC{Q[KrZ˟d@.D}vK='ڣ5DqeʏtcmWv*)u~힯[NSEQ{+iN.`FXۨ8N/~;8^tOpM1OC7e/~qvzoB`uD"||凟g}/ipʠэk(h5 B)eݬY7z%\Z=M8 yZ0Jek;X/gJs.>ny':?@\RL<y|xT=Fk؃S_$ o$XrI$-'d ÃB>|qv|Sk/{NZj滔 &[y؋=;7.%;3Pz Aj~1 wf R_Q ;MVg'GW0=Ru-K,ç [=9E0\q=i//zEU{xj9`)'p4HubHD~eiM"yj IHoF/ Y_=544\K'=񉩨An(W'hVIIo|~?;D廣298,SL\'抲J/ϟ?4]^?-_-$*-LJUy=fJ)xV&?s/?~,&˿~*R֔rp\$IDxz%O75 SС7d.DxDꅫ#$5QT_ެ>:t^jF{Dp'ߓ]Il/eo@yOcZ~C퓳(d$(IO?\ L WB7yY񗏒C9b e/|f'tR nQeR =kd/~`1.@.܀2^ͼ-3uQeU8 ] "\97g{67/LϞˋE'k&P\g| L0C%EfRҒMQTUT UZ<2l+lnS5Q$vF?ލT43tC_ݛb`"/ /O ZR(`N:NW;:X^ڶmVdw$IpAR?°B= ۅnwIQv"O!H\oG#|ݳ?OI5rS]! m)-jp; a26Ē0ն0!S^}Z-sp#!&{tGk vmnzlrQfzQF̢I7vMGzE;v(VsOa Lk=2@f, }aBG+Ծ1TLO>HXcZBByppFjnvytyҷ{5n/_w[U%L줪fp]X%_l}gnޣbn)BNc7):nJgU=MGz,Cµv'IƹȰ lHW L;b[sAa^2Q7gMpCQtjݙVA>012pĸ1 `5D۹ Aб~I㈢hkK⌒alQ 7 1-EфS(֏8?M 4|539֏ƥ TtԒ{4G2US#_cH\kgh>0 hƩCEt.|7Q34Lk'ô"yɣiPiű1q^hf@ 0ad6 a#r]rVLb)6N #}SM-sd p rhDGMEsمD0Fnx]x\T.<-5+z=-75jr4 LE7("{TW||{ndbݑI?i5w#lb؀Wkw$H1x3/@|`pwZ g_D˄kS,Vqe1Ͳ, Hni~8DCNqP?ӀeZQhECfYnFfW_1̙i`s˃=P8G< }\z\-|}lqL=jm~˨X5 ".?x&h~LcCZ4)h07=Xwq<;gTvEAd\|huGf:f}d*Wotg`/ xJ3DCQYuk `A]N"6LͽԠ Not1dҽsiP$\ݪHN=^bX;g0!-xSնyp~hmhQj2+ ZjVLܱr*7FkV3@͝RkCւjJ7,OrNZ=hvnrj&@5ћHN ZW7mG;Vв9 }O}vߗ+G 8b_H6ũ g#yaE^5;' &޳(Q-E,JZPo|A`[lu:;sV[1^! rj %mTEB"-2:/ƣTi 4 -ԧdƧY{_bzlc_-^enmYtb]sQuW] |۴\ASN3 >f39޸] x\z/Pu`d2vd2(݅J(ܟkrhMXe;gss7pkMwxIoEa^miMbiMt4?{p-)k㺶3,AN̕8u$݈ugjxJ_!h[;sg,R$q &-PQgVX#EӒ,ÖG;;g޼&ws+ʿmɪl}"dQ,I, |IY$eC%6XzDh/ vf}NVL٘4%;ToLg]h<;QF ULoSPxJ,d[<]At4xNM:IG>7;~RGNW-OSqk]M!3vL&ϳ.Fٙ]\=HEK?td-.` KiMpip ԇv`sSt75kuвonfRG)t$F>D#doxp>q{1-l4 񎕓dGUD%c}=˒XִcRiZC>O*Mj&)xR9{kNOpaڃ5Q'ܙBqww6!9{X9p뉭#83RLx񜂧k[7O\E&x"vJy7vv__a?s_{ _?| kyϦ[5QTC "'\o9yФ>p[ՖhUbsS^]7Qjgxu~X('䷓U"?}žB,vqL`YIO|W[CNJ.LRz\uR#~fb9f/ duJU 0ZK3EzdWdE:#I:O3=|gڶ3ǿ?܆Cj#X}G'`?{VW'V:sKA\vXI(̴+9!sqO!뼹cqGCɞ:EjtTJ]6r(JcFrxe|=nԈkql'fx{ɫ 51N9&J5;6I5TeL9暪ly.y*z9:S̏MVt-#(}ܠon&sX jjka nRXU0 p-zˊzFflr}z˨a,KN@se]k |UoL{Mf}ML[>VB\[)}JoX6.)}4SP ;uya2jW `xFZI|^[/>5|;p`z-}w{ڔ鋺`ˉ~.ͪ.$K5fU'\=.tt~cpX XMo}߷zknɫ%_( Jwk>Un5Qzi0c kCn3V>;T3O&FI)%$xIA=4vb\czV٣J~<8UN%_MTMIYs\}=0nTww[{, \;(EC2G)N?}=0Zlq{ڡn'Jxjوzpi~'cٹþˏN:5tJl1tsb3p9Ð2갯EԹV+%ijZh{BKL;(jϼظku-kv]뉪~h뮮jc ލAi|u j:|Y:'<]j7kW[]/UC6IC_أgc!"~E/-7ɴg_۶&-u*0LǙ^Ci*àR1\Bm֒%canJ^aj*!72&Ohg#qhr@&=fTj c⚖Mza,^’ݿ)c E<>g;ݞcWMlkf연6zSpͼSi?`x&>{fV|^myrMƫM4c{أgYE*tx0֍n7Jkfqam\ﱇ+}]4nox?|emǍ}nvܸ~ܸč7Mt<;6 Hjb dYi3Z$E˳Rtjێ;weq֜R~s53"MH4,77y>^^匵h+iѴ t2rc"#[7ex=0Y8=rΝp5wt'vi)ۈ_e?\blFVGmܶ^P_/`Mm~z7XrwW!Hm]Ur| <|L?ӻ$>Y__~|ynS8˗뉙wpq%QT: cozoggg+i?'x囋nܶݸ?O5ZFƛ^˻Omg=2x^>6R m? ϗ?qjl [m2~д#Z@ ~w7|WO#߮&|s$ι寽5g;~+믻}n5"vZŲ5\R}UH'?!حߜGU5ʇ;Ctݼܡo?3w5n1MKv0r=Es>ŁϓUe8/*^O^D'U~ꇠ=EVՂs02(mdEFmeZM2h[q%+ǯ>}\ABɤAf執.KO'(O^WrA&!6{*Fnb}0ZevR &i$Xa^IE%PY ) JY`TYdҮ I7(Āws TM9*2Bx\y33 mMP(9JC%'9:D%P>r)xrE'})m\ǜ` /ThF\Yh9*;v6{M޿}{Pi̗j5&娴j)j KkKLIQǘki{e/m`RV›BI֣otLHR$5̶[k s1 gPm8U$\s( VjP)EP ՙTMBJB Hwlp`P&$< uML}EBk06y-hG;fEhH\\ΠB.9펁Fڜ7{& X,pZ5A̓jWhAMʀGhͱkl:{  39Ih MGЃA2CƢ""kTF WkmȲA ֻ֢!3 &ΗA"F$&EҲi5-d*Xd{>﹕8},2nYe!} P= .XNDw m' TvH:\b F)84 3*602%ْ*˰{!0 GMw3BĄ#`PY B|&{+4')=WH8ꢄ>+,w:pR J]V0z,[Ѓ}Bw\WaU V!!o~Éh)w.$'g.뽉 .DhyXaVPPp^Ң!K<$#.h,  [P`a-0 ǀTBxLrt,[c FeX-D LH^QRGKlUhe]j]]1Dx ̬@4x7!# !7]Hı&.EHҰ5&"Wdey"!Hd"fL|b!TCͺԫJkgj0-Wł]mGfT(؅VEu[r?|^`ΡXKe&xϓy} )Vx,vGsSjz6gx]f?Mn9]od/ߖyIuq3fCP4eo޿wrrۇKȐ(٧vn0d5S~imx9٧S>쳣S>S>S>S>S>S>S>S>S>S>S>S>S>S>S>S>S>S>S>S>S>S>S>&pRDᤅp hQG S>#p^}*٧}*٧}*٧}*٧}*٧}*٧}*٧}*٧}*٧}*٧}*٧}*٧}*٧}*٧}*٧}*٧}*٧}*٧}*٧}*٧}*٧}*٧}*gdh #J7j7:~d!S># NxX>S>S>S>S>S>S>S>S>S>S>S>S>S>S>S>S>S>S>S>S>S>S>sFdk᨞7f]-cnu" M-y`[ z:Ǫm^vt[ms3-{ kMŪ:WO4 +i]!͟;BPf[ZzXe'ۏBumtlseD Ϳ3W|\ 3ۆĄh"XgIZ?[HZJ@4HX I4N"'K=ҸlQXg%8=Z: A]lK }j X͙g`al䄋H/,uen(of(F*XW3V xO|\ޥK9ʋӁԃ PTVK>J9V ^?lL1q^_HZ3@4hr/Vs@:-e'+zc<3+5+ćBk@J]*q*- L*Y,rեXx3/$ĭˁhɡ5I_cwZ"{cfV{ `Kɓr#Ļ #7F3==b٥deʚfӟ-$ehT!l4̿L] Uf}e;A,wtЧwlX1)7o5s6~R'X(RKX^'~,7PԠpP@:W@Z|?GYy,|dHnMW{cީҖ "|fX^:>&mR:QPI35\ץ~ TsfUr|?~ Jyj)y1k/,zNtԬˆwjQ`7W4X`c+He"CdxVZVF{ayp\A5K%bT&*S.m6oMW -j"ۗҳO2*[۸΀Ǥqo^F2y͎˥ח)/IHLSp\id8-V|uYn>!x_oڲt;ћA q>^0&E]D;aLG2s-Y' $oJS$eη(|,8]j~Ǿd.{?/\GEA\g–_+JʶOu㛷s 3-BrxU|!mbo^KX%) pyckE JDƵLj_Qj'^DM鲦/yNX#K҆)OA{cd泠^J F0}^ %;d )Mn̻‰4-ALj>l2Yō_,32d,pm7{}뛹[t{j1&y3-F,< fMnm5i[U[P7/|vzGA=^m=siuva:^enzqmlٽ cXӦ)k_nucQwo3 :| y*Mbj}PcwWѬ͹+hP _}x׮wVst.Ne773dZe+n;xevsbb͡,Nkgsm7 \24t{q|ǓNXkW_[ēL0,W|lnܼ+e1j4(Ye5o>6s3Zә~jyFsS.`H(?=1{Ϙ=<չω_S>#G!ħbzAҶ6z!m&mEZ)MQV[O:{05){}9r5_LRms׼sdm0- +Bˠ"ϔgrVi#zM8|d`CBQo+CH<&9l K}#oIqA*b!ڧKmTL mZŨ%ު ȤD(|lSόaMBݧCb[5Ac*j!M$edٺ66@xF ,EN{L@ 1 Ʈa6Tkf\@xv09BT0 +ϐ`!fJ ֚6xi4ƬUd^as[ZF8]ޕa^Yg0#1'?h?5GQ +Wچ ]YৱĘ(Zvs/**a9RϭQ`J=%HYj #: 3֨@xnֱ'gi#bvAw<,٩ҕkn{5XC md~ZW8uLLey!a_Jh2 BS:$ 9^XOI/F%-E̡e:3VS[aİrEL,C 8ÅpxGm&exv0Yg_\EDc"B+'-`'a) V1Ev8(kmȲЧɮEAX`YA/ zuLD"mϹMZ%-id(>NWݺ:U`t[ XkƩ琦C q8-g4  *D)aI"Q([ ޤĸд h%o :[%9&)˲PV" T1u0˴EQ:l̚v&F4Ԇs,Z$D@riTJR ThH ,I;{H y XNΒɂpTB\1L &A&ܙ qx`PP*fR_LS2 sYfPAciYD@1˥t^@c|Di)P57.WR̨Z!0cYoEC ) <"F7F5F)K<;[cNh:SDA1`h&v/OAd $UMSB`iT66@% noe [T&n_@!R2AV%T4gԌQ!]vYs!^Bb#JQ @/:N?8U [ue"Ju.-%}PRli"*ȋES(F JS'EH() 6Ja@ tMg TDy$k @(?˙ ̐: zȕA}!?H(wj|d՞&3tQP %&I%n% |7X1rI"WFӥD 0IJK1:Fff%ؤ1%O# xN]XE Sh$Ti u!FSq1S(vԋB_6۵qlkAY-S"sJb Gz{D#~i{0O#MTAk[~/pq9R*ƞ|su ޗzen)rr|:v7XZrJs|A=k=Y%gkzM}C3^<Ǩ{9QuWuV71ըG4x0X[:ըS:ըS:ըS:ըS:ըS:ըS:ըS:ըS:ըS:ըS:ըS:ըS:ըS:ըS:ըS:ըS:ըS:ըS:ըS:ըS:ըS:ըS:ըsFk$yQ霨CpQџC(e=Qhԁ&0WN5TN5TN5TN5TN5TN5TN5TN5TN5TN5TN5TN5TN5TN5TN5TN5TN5TN5TN5TN5TN5TN5TN5QGhꔌ:`-ZLǨVNƨ#@puRj#u$xFjԩFjԩFjԩFjԩFjԩFjԩFjԩFjԩFjԩFjԩFjԩFjԩFjԩFjԩFjԩFjԩFjԩFjԩFjԩFjԩFjԩFjԩFj92|]^׾ֻìJx`{y\HGm:kl\6:2k;r\.4.`c2^ v[$ 8]I|wY4 ٫ۗ=ՠf51r?fz>i4#5S`#zZN2=};ɀcxst,q'ցlH6PRQ`)΅XPL=BIDJO=U&M'V+Zt`Lv3&ZA8ιo{YwQ4S?`r*a9t`\X4}h8{벞]rnWߞ}|W(qNa;yu~SgMw@o :uz,7n"`Oi=J 'DrfVWPhES~L[f^ɬF:ZÃgW>3t5~jT9l%3ˉysR'׼.rVLϹhZ ٩4'Si۟b[(Z??ov<:AY0ܢ{D -+{?Xe XG=)ff\R-CVsjU*W__][,fߴ{QJ{]| Dߔ{J۬Vtïm\_6ܖΥF0q[xi~/_́HY"mOf $=h6\A7kS6 #rsbvyKix2=9Hb"`(+,3Һ76OG S*v3\,6By'wvK5N;7?ev{2qBReuU*Ry|O4oq[zeRrږvb~_N}\t3{%?ar9f/tbLa{ <`\ƽe1T{+bFeѿBSR޲ЩtRr"`=5J3gNy6i=TLb͉UJ`~ovn Z0->wF=eS$#. (G6t"]Wcm6`.bl0+t*& t*1&NM;HpvOqI ͙j{G#r&V񩄁cSڒ%xUxPt-֊uIaɂ[Φz8POqSX ?bzl?zh^0%sF>06{xa\>c3c9g|*,J>7S+iXŤYOik1>k|!B0/c>$C/ىDe覩u%'3c<{Jϛ3f՘j ˙M`ΏθĬbPz"`5g:25X?ݻ۞~#y1{؞ǾZ>}ؽz"\qU{{T`6?7d̳P (ʦ )XRxS!كVyIN6^l/%\ p%l[7{ȱ\ aSbJ]/7o>7p FF]HMS$EiF+-9>ujόl[TtqymXjIwwO[u:}~i?tA=[dg7oN/nvnߝ]GfwJ雛M/!=nr|׎cvTfٻ8O+a?gjػ QFhwU1N8Wko5w ,.udNV̵:kr!=NFJfךoT^돷OSۀd7Z~FXND~((u.ooqpWl~\\!zgslgж͊S>][}WLtn9MaO1?50Qv"}LkO܆hov q-XүHۑZ}~bۏojb{*(|rb  ;}LL˲|Al>?N`} ~X!Nq9qon;w ]ym/W>Rsm֝ؒCIhz#LMrdgTnlw!+ ||@xdhtWZ}|Z\O/j.'{TjzF#ث5`g-%e Z1UupC>2ShF՘=!1*Uڦ>\uFw +qT^'; S& Ғt_WjeߺHHD:R#Y<#&3Yn# z-˥}o- A(&|5 F$FQdzs&EMEhs,o. rwkK8߭"R2YVވSSLj92HTr1YLf c"bcrѽ Eąj!SRWπHF#G}-:qT=Ĭ\hvodamM";Nr*0!?rih*kR=^b1;[_кGrSJ D]F䉗B>OWY*CVTHkp8 똑2d]șcq|}as9 dVNTRZϩRRU&՝H9iE%x:.DMѵHIZ%9¯Qb5fI^TB2GKVCI ֪З 3lm6m'r5ʓ`1HȚRL`U} TC֪+הOQਧn|fnJ}Y\` ȒQB4BFh եf ӘJ2](_ ,`]eQ[UCEEG |SZø:3mZq9*wJV-Trպ쪄_@28gC3cMHm̭Lt%O;}d)BhY6XWϺW3\7˦ ABpѹLN0YnۜuU6BPѺ J(Qkj oGe@pI2)w #N-` t[2 骑`,dc!]AP$8%S%X(BŹ)JMdLB5O M'Idd0U %%2j3JA m :Yom1]5@ m:wlV2|댾5gAx`1g#n aQ!6K*pc_JN uf9pu>K@T`-v w[:* pqҬ A{s m*™RP(q LseԸIj wYV(h ;:jA!O9!Uw2J*LR5Fv2+RR48Q f^n!G%ucHHė3' 4P]MFo eXGkEB\FҤi!>38o#iP 6yrƬsDq2|1&PTؼ*"0"_ 0ri rv#I>n{*ՂYew mCka&a ƛAy@y*}rV29{H\!ZUF"9,: Ρ&i֣❂E 1ڃVsH&52*.eh5J|OC $kdL5E[ q;*p /=]ߩjPZ5%wV5f}Ao3T Z)mm`=83[|r}:o/|utqѶ3I`UP!n[m#FOʔ=V`ӿτ}'UEŨmCXSr5H<JVO Ѯ @9g\/GF7TfĞtz7LJ4𐗨-alM1WTsC<݈ܢfUf1MA;hF L )cYQ-HOO(hRC=֢mՓf b V"kc蓛knc`Qwz7,V1^8prBki zsc>JX{aw:H][ 駀G*Fe]*cjdМۦwܬxZHkU 6Rg(H7f<%4t FٌT-ŸC69y3:sP׮Cr5Ʌ7[ApWlUlY[tڠ@DxqVt@; ZfSZش =W&-ׅiqf0M蒑8v{jDT&ЉCX2\B\1Ab~x|pi8 iΦr1yu4D \%TǬd,C,z1ο{:XFGd~Xc͚ǂ3Xn}a̾("pz=Zx5'Nُ?|j\o m]!y3vNqX'O)&Zs3ѵt `+vOFVl^W'ՒKL^сEϯ,֠;x `yUeY`&ms3'Qir?Q;bjb}.<&zv+4uuoKXE?tfq|zqOw7r(9wy`28K6IK|IW Hb2%^y H%7ꣵV4\aDQ ވ971Џ{4WݛW8q8c V j_L8KX)W[ 45!:b>_Dr(ݬ~^2IƞivGwz9 K-~ףZ쏊feʣ+)~ل_<w/hJG@$U*S/g;B}k7"e?̀N>-ܷ(GPx;ݴ㏺_R|zya|\>>?*峌?mӎ_~j]?s9ۿݨz(z|/j0דY1ߥPS4vchVr:s,et9_"z /73: Bڝ1=L~N 573jBfU3:3:3:3:?Ⲳ|0 ܕޠrE_(zX̐^rTEU BѺ`%m1Ι)EL+ameDg(2P}(;{O:f3>}[B:YN>[y!v3@bJ5ƴD(r9G)8*1D,AѮNjS)}?f)tPj*rZ*J6J\XԔ u:5̯uл%Xo |}<֛3qXiw jAnӲ_ϳ@Xosq-kwN $(ZY"}PJLCОgf;|\wAu񦏐y7i!fub>%7S7KdXr=!5Fyg,hE4;R)p+45JZYcp}gű8})netN3/]x" 23ƝV/ő΄CKst( XeR$"YɬV<-ӭP15#)T`B^|וzJưgR|QKt#QRE Si:t$mlE9؇9`?]E.ӔDQfOI%c&*銂qncDhn("D~>%^6Jx0tOA着D1AZmvBN")rɆ֫뛧EoV8Y&GlV%P({ɆVo"ő O 0{w 3pLѴժ$'\ťrђ;"lEWLkPF?\eB0X xZ UQWOފmC8"mr8Mt­nҭE $'=ŭސ} H1LBǑfU8=B&yaRtqzqV3ŒVԘƑN V$) ؒ`s`+v+i݊]C8򾢭jaZAkT%BR$HLd$ 0vw+Zw {V36,d>@*RҚ5#eP0#.Kج\IV#o(򾢖ry]gjw\E`efK..Y YEy}g,_7Nxc$q6b`Dd9Je˓%{[b>kHGW|˧fccF}Ÿ6v0R6T$Go+Ƀʒ6v?y_3Wch,$yX2 0A:8 l PaKP6Y)AL|`+$_gQR EWF`,(d",V¤^E,W?5y>4Ԑ(X~G2&,C}r'_/l=:1O-],oaIP\TvJu?ޤkQ7p[y㈿z{lv]x{ urHkn=t[칚N8ϿFz3v3"eF-;/v#R6oej'}q1 lU`MG;4(fJ=r(q{{, Bܜ"K~WKL(fy\XGne![y_KXT'*+" DJ;"f!hx+\̇ WP.Of/xK(lY8ԗԃ shc'rQ}Ee3w#z䰑PX_ ^׽x3!+Vt؍:ui?ً`v_2ʫߎ8Ct1cGm9C-ji tML"+r1q]ߔgr;1[ep$tp5)i~f%/n%7\|#Β#,V88MSސʛR05 PaN)NF/2*XގFk^'rN@e1}+NcHnA$UL¡GkSc6\I2p6F#xuBMɕ%u!A[cV|'S/է#RĤPJ*H@0'!eäY6v"GhEW=':5!`֌d=]9 zlkw;4kp(N^ lϹP~=~Ň31o*/rrCn⨨{L@CW #hؓA\L2XC(ek ˉ5ESt ydž")rhE\Ž$qf¼W.! ?ܘ(BrZkAZ \YϤWYu irq0[~[#lMaQ8"]"K!Dsn{%O8J0}E.@j>͎Eg>(3`m%>Pܞ cوʹ0ԺG #e/xzQIuG o*J-l"6pR^xzG |FzF'z,3,?aUgggtMjho_Cu7>=AGb3/.{/i6F΅6j6c,S| DѢ>RۛbWtG@o[m c9UB6ʩl˦+i5_MnkFXU;Ō.(CƈT!g=?PݙIņqQڥ;‚sOݳ,7$+9ٱQ~L|#ͮ$(RCv] H|@LD휦De)*iلֈ-VįVr7-u wx\ueٌhj1mp$lXny`PO" bc@P%` #{GFHohЇ$) 8RD(C Jg #6윗Ѧ9wu-5ySনl(WmR8pt(R\ySl)Chd }7<0=mT#EM8Լ ?F\GYR0Pg끰:)0W%ҰCZ(  #\p<1Ոә^xi|/ p> `1Wb >ewB ` 8aIc^5ǧ<4>92Z^$s%eȜWɣN0]QDmc@>M}yFCVv/祯 xg++H=]hVpqrkr$GLf6BDj-kFYRt\ByCbѿX@e={GcfD1=.JHIE@/A}z63wIĿYlQNeAX.2,8?.^dA[6grC8NOsvF4}.B}o3C,˭ɤR;h3k3@ܰjK$U8E p% ̎ j9H|:7a uLƳS6L Z-W @ {G]"- 1*<F\HSkY2֛R |? Gɬh2Ƌ6%5r$^K4" #3#tH 4t7eS!IFA!m#5|1buG}O9&Ƙ,ԐiЁy[恀Pc Vafda $Ɣn|j,hD <ĩ#9+(`:-X[v,MSF VF!Z.H9M2*MMdb:-@C#Ǘ~艸Ջvp-OxV3IF¼R ' 80 n,:ԟ84MN?$o7m:oʱdv;Yޓ *ɂa `lc'SMol&zP{NBqr sZXk~έ, jruYe8QPsyʸ-QʼnFus%39,x*~?u |-AЧ,O6X"[>R1,={v瞁 `9,׎jJJotgiɸNN^p(tذ }uuj{Vx0%pZX izzO0`ݮ d0W͐(t}}iND9w>@!eJ&0$31a m^P2&Liø0dكFL),JNEQhͣY\w!(EyΈ/59s!J9U+G )-@&B_xJ=%~QP)9uHN1GeFB=6f(׈oY }}pTe=o3=;.KEI, Q5TS0T4m"&{Hy aipuJF(JuUe?ku  {I/ ̍)U EeXF\J֕!CM~z>C| }И>{f aڱ.$/.8*|h}LR!P6ۘUwLirmlANa{z=X#+:VBlcqTOf'0xp9%$b sȥ疫Z0QÑPgA(YǨ"1bWN0 器K*3R#ބdgGW(< t^* _on]-Z)x)āqg[I yT\IX)_Hru9C~ ~}%_K0~pMc>!zv= a<2= N))hO xU_XUѸ-w,2oe1T+,6Y!)WFhh-upoc GAc>Ή3X|IFcAy۬gyg`YU6WTjY_u}ޫnbgJ,sZ^eN_  ёze2DaafNQrg; n9|]7ۘ lxOy}ݬk~2eߊk/vV|<[MEÍomeaInrZDx2Xao?AH(?."s٧#np GT(zITy1EW}+!6-w5i]G{ :n'mfWsNx2Q&l58 ~Z>6T/_j\-8PN:ȟy2Zşho?ҟEvkC)"-P}k]}x4!'-YxKbR꟫#⩨]4 q~Y2u}㡈KFzQ=rW-6e<OmQ/,{"!cApbL7|:fUݘ9T0bMk+*\NIEvj ̐2rk2鼾n C9lcJ|]vĻZ)y9~~5D[JY.pH/ b^RM&?]Z[ӛsUjsTñeUGk_d*Myq=?(8b9fൣқxhIT41Ғq-u[)?i*`7SGɟzZ3|Єcl@!ЧN;m^q mIT!XPdVk2^AW0߫C< Խ<.$f/$?Zچ:$"1[(5zRLj$uUexg=_\ (LX.JF1B|+OPxE zog`rdU2 Pƕ׃M Y`#N,Cy{q=y?(/yqܷAro ZHR 8k hXJ)DƢ -kI$&u~C>:)C%5X~ifVtur} .S'#3aY04n귲76qU)Ԅ۲mA)6+h+׌;,RgA0O3f2rƐ{4?Jc7G:f~ ){85}W}x~b7Crj].7K6Hr=$b" j=x!_AYr<߽Y/#t<X%R)0nÃ.yK Jg u;GD9wU$82Y'#0eDS`Lr8B"luU}/^og\FowusSQ٭-9L.Kp>V*u`hx\}?јR88ʑ;QKz$dN,!H葊j${܆Í88F gsi7IB~RTo+>>Kh`F9Op$>hAu0:e&L; APu[Ӿc (utzߌ,_6'z\ɍ_% >8{ h$3s)#gfkpbwb"U[ 'r ' g`6Ҽr-4NCM!j+ھmnFDXstfP\ Wj+'һ)\3Yۋ_Co4zo}~La}"{3e܆jmTGcpAst3z>;q[\Ɨ+[qAQ`t2nQ1l2?OߑtJ6Z66 u!f@ f@ Rq4_0æQ*ʤ8cor9ӶBi7J("MjdPj,e؇_#h__ wy=&ms{ZӌwFl~ )_j)*h'D9%2&yʔK"~ D U1&u]y\cEaQïr~*Ä UAJHS-%41Gs7_ ȷ6Z;3_lK\ɢ&I6֧haS[)\1\iM9;14 lAgreEnԻg(ኢiO06KU_._ׁRz ZKnXIYJVKcf>1?ici-2 +D|`TbA[+kѐyY,Tdr<:FaFL_yDdv]tQΞ4OUdR=( \u8HX!bJdmR㟾עgϠCrRvyC&GsfDqif< `TR[wХP-n>qR9 :8VoGj$؋MQ~-EQ:8_ksjѩF֮(s;e[N˜A~gII ,mƘC&.1ޯ]n& XW$9'l6ciKQ/\mOݘ7 Wx>Krؽ/"F9B*Vx^0+ v s%WMk(@̞ Ƚ̗l5FX?WvyQUVyF"AA *8RqC!y0ђ(ogU `ȸ|㷞WB"SjTnU;R_ԯcKUP@.XK=EgU4"%7=bաTA2>*rhGˇT 6g^bX))" T 15D3 TI{̛@LJ'7|5{1wf/)2q;<]cW}TZKuWIw>5h|}=fEׯ qp=85ĉg{._໒|Bv=,KRL\:iK˗ifQnȉ$΀SXA49."-49x Ӳ sԩ$r1m = wyVNx/( ŻeXuxU\R-4J1=Ê_ePQ")1++©s* .!-46X8kx3:C֤xXU H'/ [+<HZ] idQ0+RY$90DpgzxU] ?"ߥ^aؖZMoHSs2n9#T X9gˇ2nၘV'(IIX;1Aqm|e~{rR\h;gޏg"T0eғY7E#0?<~ϗlGG^]Df?b셀K&3JMrR~øK`D#` K28+ySAy0-pt jp!qx3RU0vVGE"fEu ͉3 e{d\i#2n+IÝB} N&̬AUϚoa!ARwh#|`˵jRjBFfpTqdIVzPXǺR|08,U$A@4>QX׬(h֏׎4^ 1Qmmf*בr,h),MW 8nJrY藾Y x9u':~|uﶏyQbA$Zwb#sw!XuqJTJr#! M.beԼ:  {'I=X9t>UFIR.V3$E%j̛s=~բFfp5θiR'k 'k^q(?} Y6RLN☪Latst3(?b41̰ 7CR(mL #WKnd X=HU  Cib(H{ Mke;xhPFTa†AXvz %o-heYM*BP>\)TH1z `Uz־̄nΚs>6g w,Pvp(Q>tVLOnĎ5:vߋ*^'!]^J:4F'պD,g KKip%@f38D5s.8.2Bā gܖ=d]/t`7=<1^wEk:H:0{DT(Tܺ 9UX42AzX^ʗ*q 8y2u?f^u*-42ӃE)uL])"m$~$ T(\fԌMrqa<ƋTc pfMP0 Aon4*5l謠}ˇX/l}@@;R2csCz?b`KiU)>8sMXawiA\jCr%ːW AOӺuO_"XlAڴ N]!ΦqN\CB >c45H1>TT9X_`z&NHWU$7Fd`)N_&$Lf~[:1H92x7Tʲ"*jKUbصbmB02S+"hZ/O`>9O"34'+YuPeP-42M1Paùǔ7PѰjtѕrAˇ 6$`cܛӸ-ik5}$2#;(~VAߛ^ͥFfpC&*vLC K׻3zxz,kRQi-42!E~-%^$l jmY;%V& (is΃/@cM¬/NM\,59LblZhGS5GPP[hdG:N#3>= ^qUrkW>KKvvDUjAϟg֧#دa 7y4@h8(t> TbK*3ZrD`,3A۝ޜNxቦLhsI,./Oߜ4m=Jk|}"Nth)a;}IAJut @QR G^xX *jyk1}iHu7V.n=@Z@]g@c޼ i2C餘t)z9<3\,Wcp@Dl~-42O'}ղ;+86[H$륆*.׼4}V42cMjR-4#E/.|= s=sK؊ך ' Yֿ|^jX`^+KrCmM6n]yq>w++[og&jp3[18; ev vً/H{֡ 'Jk‰M,)뿛?M?գq~1:5K ]>[<$lt/w?-mQe,ڣnhMu\}WۍȨm強h-xdfvN7m'~s5U;ZMP7{;j;{ޡ-pNo(}8EP;S6&] C^(z2ZJGm˸ѴMWԧUd.GA9&l֍lF5!Lf?_p"څ; A`a s;%Tſa+(5s%[e&qx F:TnJySp;;)OAS#۰Mئp8|ZYbH1m 8uR>;3,65f S N ocisz@JW>5׀>ѻ' f0mqR'{zkc wngMhy>ܱ4RڊD VJRZ`i,:vz{cf79vUI#؟4e/"wPd0,&k8ZBܣ9%@;`c9YM8jF(^$*LmBQT$W1A i6/㍍-G1E Fe܄=z k(q"oFVڊnț% X}YGX{1w$Wƌl||6㍒.aY%UAHO?w?6{uڶ3{Ex0 fa_z{0[îGݨi,j'5|Nd:kB99 1F?wǟ~c9z-w鿍+X.GY<9ӇVڹ(G7.?,mtTmd{Ho`L`azȧvw~>Q5lՋ_ vCF.&tx<rTG{=TqQ`-wjg>oW"G4Zk+'nUz/L'KZ՗eS'=~"zGH:5ӎd9Y S`]j+Y,ot(/ 0N]W8o ґV*˓r:SK ws.wE'u [)`$H YHT2vbU(j{@;`Y-L0iUQQ>Vg洄.Ң9C;dac9Ym8jyo폻TF?&Κ >7]ÓQPh%~af)5*[+64:1ofdOq|x1jcPVB#38|5kCߋ+W 𠆔vaWUAZ=\5Gnn+R0]H^6Ke)){qOv:l -oopҞ̜:=lNx+IL ;òL]^Ih'ÆzO_ʽ{ TYe */JU@Q#@^uT(V+Um WL."&fqT[ xFB?p™\R㘵\I;iJ˘~Hθi± A-5}|#v6HwQHۅTGB' &uF(+o&vݰlR r+h8IR1UeX &|TRNҦ),U ]+Ɔb T"y! ^ MuyS,&Lug4P^~QKyj=2qh +ؤ 2x$D5Pw X5,ڤ c5_*)bxKqV*O?:zm(/?hQm#zbG&|~ձ5K{d6BHh3ɓLMz}lGsU3ߋPd!o3ϦзF:{/h$b7hUd6LSzI5~|md96R9J>AJR& ) ybP)RSY O/RմvYGG];pWX v s,d7N>ǁ9^r:JZ(r~B]"+))Wh?ȣ ̷iԒ[Ñ>u;l.J&j9MOtvUm«zhtZ>qy"BnXl KqW?K**M~;qb)CL%=K{zֱ`Gܰ2Ui,M$45c^D$Rm7 uڀЎtDOv`Ib{W޷ P1k8 ŠbfH{!AP VQR7k+`(FOO$5Kw(pTWM*So:wK-=ZTɏр 2 bC&&4/Tb1"Zru[128/AMt4i?'*=n0p`*uyI&9H%i) 't.g $mXHM)mN 8A4p"/4 ֗)򼆯 4BT0C,®ED%B@K;p{],L㕞#؀{m|be|}j\K@tmyHo,\R7+`<ëZ`fZE$O!)W0S "O<(/R oXnl^&X\Vub["ԑhYnd2e&0'Ip6s6& @CF9 GԪ'*c)D'XMv9^UH-rMyj3ZͰ0гZB5KNyUr)Jp n4+*}dAr_x]h e /t~Sɛt4;G5c:[&qv!oSp"~?7REV%=LF \A#9#h> &ʽ9p`1?vn;!:|-g i68gǓQ8Lp[T /9hv L;~.PPS-|>l^=0$QkyMq&re<6dQpO\x2hFAv0mVjnq:*7X!Pk_n [kJy?vMɸKj=y&Z;@:TIдu?%chmfRFgqxcNgLQp2F눖[gܜŵ<9~n@ Dy!L/7ؒ!;ߗf*ʕBHJkğO) wjI-n(&bDYRdy<|0IOqVV "ŨyC)GKI(TZ9\.RFvq4PW8I2`Asoٲ20WpTR qՊu @)F|v/W|dTu0#B6P1ڠSä!5">nzW&dEerA]yiX!T0#m!Z\+0E9RؖVnkT #ǻ8#cs[C1ri|/ +GAstDJk7+@ t;LLXFlP&]!uJ(G|=a[%Cݠgqv N*d/Bj{mu`ٞsE0#CQix, cxfbOChԧnnWnE:I=&ibRBb5kX(t}"X&1A1À2d$ #'d#A}= >nrCd} ι82ZC5rgAصq i@qM*~,‡}ۂ:bd:5HwQH(q7S]w=e&P\_sSlCӢ:# D:m,# j5P~ȹ7"PWAPVZ6sT[VY0%SByj7+`(FN"  fDP+`(FI8io{(i˒`hP:h@̲̔&T.c ,l@萺ӹ׭b,xc ebmX1&\۞sQwB?V@yV Ֆv#$Xר6`W0Rɚ 㔫 Os1ĞZt@OF>ko0Tc\\Ѯ(8+r7_7C34EvDaL?cJ31/t8)q‹ib'IFAƉb8mG2ĹCx ;yk!ئ~9{](Ţv6ˣ.v$DiE$@șėLmtJ@buq6kuqR) KJg-=,oR),o8v*0sRflW诃3u0?;|j;=oFLgIrTczåsO4cz.`z?{vFM`ea! Mkz 9e˴ 5i$ @eF#7 f<{xL+4Umi>izur ' kPG-L~]sZ}>7ZC6Jo/dٍ U>Ʌs:.6]&O P/q&6VUg`g0mo02@mN.zߥ}k!,FrFfxoyi"\SnmN碻rMtxxϒ|;]sNj %ZgB:5WBϽWz^ڤkK xYЩC!B}18 F)u>Ձ !Ky~1b,RkzQ*Ջ5~m{hOQqK*GKw&+Vh l.F0ОQ@p -{t+?;Oiˀh9't;N ",2`~ga&V5v:=Wlbof&=Y4 (5cFVW: /,S7|ImXafަaޖKON;nt]ssuz>}G8u?p{\SMo_Bh +Re4 ~W|s_pxw{K>Jne}~Eh)>@].QW㶷^6dg-.#%io|po/fHKĿ䯏8Hh 8x8OqOE[w_4d\h,/hh{!{ގt]ЌYcb0{O N $"9oPx[,MOhA]Lv,}h&Cz*X`Vhc %cya0\>%(,1w)s^grqGd5a2$ޏ&^s:48Oi2@H|=UVL4W%1Nt)?.Ų||/8-X8-'[|'i~ Z/KfRnۤ{*?'PvbfByȹ ##?sO{2~y^n*Zܪvj@4:Ҹc%@.7x6<_ 맗 LolS.M83'd</֣cوEc@;ɊD,Y*+`MJy ^P[}Ex^}.մpd2Pv3mjy<§\'|Tt٧(#!|?avty]jCHy-s}_ww:2 zuB 䮣7vVk~ki!ȴ,Bˣ,4"B|^Uŋ0#(MC*_n 6fm{M y2nR!ΊTy3?N[!9 E\G|eѩOx2x]xjwdi# wYJTW"޲0Y;b*A,v^pZAp#@A >-.BKO/vzrsvs)юr]x bqdFQu}„X)`G(D7{Wܶ׾g:A@n[i=-lIr]dɕ|7뎲_j*X}iI?fӀqB8”mf5Oj] Z&hzmKtE'}esBo0k =0nT 8iaa1\qyk{FȞK$劣^ghwd׳[7RL*ws)zx/jy&[(,ll{zOG_3g\JJ\L$r=f*<}sH*{0[vK,Y%IRE%֤W3Qw9`߳Btʪ*6eʊ}iЮm};twk6 'E>D%RF\_ݡ;9CށqEb~v:0;[xi 4{L-*Iۛv\0N&Ϡ6s"K7JE^R|0w: tjfH, DĠ3zN2,p'` A }7QС }⭍`5kpQD8A4VB"pJb>%#,I*,ơAzpXHhzC=<9PTZhhbl $H(>|&bg( Ӏ~ SkL }_`h,O<T LtO];E7׾#μtRrJtas@$0ùw6m]S!g{sN=n:kb"a%ßzlr+ް`ψ';6@.82VmIS4{R,qv[BXߙbM1̂C5ͽd]Jcgd=5-'kcѓMֽ 5؝ M3Pyܚ'/7o ߍZwVNoeU+\ffc"F]̞v.f9b\ޏ4R4R.K#H4R.K#HrB˞˞''=eOp\ Gvͅ#pd\8 Gv7a‘yj.E;8#7 +fC9yUv1? CFCb] v1.Po4D wHF0Ï|jqXZըi!AWC;$; O5NvoT\ Jau3m"  ]}5:"\aay uٰL{Yf,,To{D~*1LtO n<^ŝ޳'޿[uK|Vz_{~zg1P {[ Ժ6,od5hP}o p ֽZ ëv:]V鹐 vsdjKVoݰY~g>4iif'jo<l*=:Gm'oM?[my=m^jOZۯn;8rпl:ݬ_4/NڭhfG,C("4Ni`I##t""42q*$@唶_Fn@() MRSEtJB@ X($(Er!Gg5PjsXX%QKLAaIpƑ$hdBYuxl@:$f18 ! c)iT3JQ"ш*@܊#Dpt@9'e.T4I^P A)l"XR*J)B8$€4 d!/Zz )FD8 ai̩ D Tj*y%XH5SKJ!2!FTD9N\s-XАD!6ڼhk52FI 6+1qǔa3A%<ЁH8 ;mX%E#mQ@N`E%80D(%j:.*I$T%~k~q:o5\uvۿ66I{j?uz[[/~ DE $f4R I)H#$~a$_Xq!/ES*SǀBp a@\ S"0|9J52,%PA=8:l"MјRٟbveGwU[Zdd 7'`5";4 ZnlߥEW/gw1]wkdxsع&v_z8WKo_'˨{zuҞr P빇&:x/:h׍:faܒ}pc~zU: 7vLnŁoﲓbcjh:"Un+g ~m~4ʩw()|OK,%fƇg*uu2x=)ҹ谓K;nQvo4hvOݓ~|i9LӦٰҟG#h<8KtLx;֕Z~uq|8 [=}ؾgߡ6t}Ufΰqݹs;a^:4O;={,ZAA+t,`^ r&1fL` 8jw&0T9eI] @aEҿB$AYg/bw{֏.cbfNwcs"IUsbƑP_$BZ,`XO~>u˼&/v,{>vlǠ+^共i7KE'R0H1/.%ĖFx[k @2\<3h`΄`cJj6 _ּ`s@ ׼޸ F 0ʻ? jغ0H slUr[Vwa+c[D&:xf(fxPN`Dx?\ F/N9eb7J\YLC*\+鉱W+^pK NJ^ )9]d YǗu.i̬Oƍ߸|xdq@:*4%l/Y44`…Ft"x ю h؁0{&Z#+xc[2G~$g[onXo,ku ɯrPkŜllj1hr:.:3BzߥὮK:kguN(wԁ Q 78,dTίf!] +M,w/U3#hRU6#1h@ R~u-,R5mX^FWQm :C;VYv/`7G|!1U& x xU19:͎ý*}Gp N&PwTsw,yq:ͼO)JZJVK>IJ'K"6p $(KN.9"/gڸ rz 7ɐ6PҰӓRJ:=kF&4߃EsG 4޶7|=㜐RR 'VMe͛]Ah`BKia(&T˲ªp+VdSSoN0: Jҍ5;WَQr73 .H7zs0Ἠg2 zmT5:v5ΕB09ܛsy: P.n.}B%x]bm Qݓw3{Bûg9 ͩI$NnZ̲*U>\Ϣ -?!6av}:;$l 178Uh:eQߤ * XR1Toߵ6 O?/EBmPT `0U7F]rd$7>4 HC+הy97aT[.~8vvb!`KcqB]ޤT'8lаAN(! &oPpzgbfYnD99 z3@8R|#Et"A1If?n-vb'*>Ró5;/#MbZЧ,~hs2 2Z 5y4a!UøTbҗ8JbYPC% 1 i(C!MV)~b-VBľB ut$_HI} _(;/Y5P=1<0PG+,G0gQwJ ut> qꘄ>ci™\0Ú%PC'=+/44!Ғ%sJ t^G>@1RT>Kj?b$0/|<k#%Ck&0q,Wt{U={ c١ع\.Eͧ$P׳ 0e]qnoLOOO";6æM i| ^pS|),LJXV"MCXFk}%yDD3Vլ&JgXE& ت5FG.0b"B%}. ݻqrmB 7}nWiu!d!߷{ ݐ?znݐo)M/@o)|V /ݾ0GXKtŵ@ۼUMcCK$h,Ab8VSAd1NtlCkJڨ3fl;Jܚ*M604B ⍠ [E[7D85JͩAأ@?cK["9pPq##DBr!gZgljP)kH1EZDU@(A<T&fJREPrX(d=/!J4Pxʕ)NgyH{ꈯP`{,KH`6NK #|A = Օ;䬣'E Q \_U>Y]_` JVȄ 1a=36pIē\x=qh}c(ߘ֎_;UFk 7ۭz=>XX@C"s&aMDO,hn0KUʨBr7ܝ=3c{ʡ#xOr0 hѥ߿k1l˚WĮ6|e*U&+- ՄJ*VH\eꈫV"[zq\WE +#]W|Ɩ|*cZq]+ir~oǝmw_}}EW|,FaJLlC<3g%#gVpQdqzX李DKsyIdJ Q*i*.kT n[wW[; َPEOͽh?Š]:k[zek#S7=uv|?IyAdvwJC{<>8A'8zʖ_VuS_rw`q\B?v)Q?|fr'3F}'IX~x2͡jW\v?qe~`~w1y3]_dx,ߪG㵟YRavx6^uGt lzk$'D?!//&`8rϯupkgew gM w vr`Ysq4tJܪ$"rK>>?j1 3(&4GԠ&Qn"72WVtn<:د?F0hy‰IRXa=G%Q,ja)W*}> =w TTHAtpAg99/FI օ$25ZGeme=yYDskMt2: bCDS#0qJb +1p+ν%UjQ}vxqpAh[;gk߿yY^yjk}zS aYn~<}?VВ+-c%ͽI.(8RX.ckp{?|`sKKTi)H;uTx+jfu Y4@yVFXop-1=5`{*8^kwsHv_l,iuA6rmfTA!4bGOA(k/!/x+/ßu SҠs J h\Ě "|h8sM&[M~o[Noܭ7v0\m C?{U{GӍq2GZ[}Z?}8Cwٵ*}O`RENOGoۃ4?I^O;w9_;uG?8kGC''a?t Fv!qmshGپpiRܰts5'U2dy-kg]󠯝+&W]_27e{>_'Jb?o2S4N/-!r5J51aq2{>I >J7]#Jqm.3xjep E𦝹c| pfn5;Ávdzϋ`!|1q_`W+2]O:o~aCgioT/*L\-_re"\WhZ?N5qz}sszP[^0.{8N ^a=E7_;#]pZg8t3a;F\NEfgmy>A(Ams&0 ms&0 aڌD,\o[M`n6M`n K Nr{Z >/`y:cշ5ֺ݇ԪTK箘;iRxaS/;n OU++O4T/>'rVs#{fa %`q(Hg8 G^ցPnE}9/Q8@fXV1lZ ~O6WxVjA^ͧ40}odF|:]Q0TVea^#oa:I˽I ʛ=&70qh'[Ob:B1Ċ8Q_mQ@CB8KVx2?cDq*:L3lTXy4^ 3RʄA+BZK K|KmmT_w81q0jvoDŽqd.2JcOZعJJA)4X$yG+&ߔ9yƖ%+u:Ivk1nJ$txf%^ZgF]heuJ2/=ؽΞ %Vfhy)&%[$Y s3 ?JqL y 3893l^Udp=|%DB;YBgl}φXH╥";ު$[l ?MNֱʂ66ɶJKWkm/w+o=V2ge! >=:C/M8wlЩ46#P߱7n8a-b/ zje:(2mbrHl'YA4P%淫 hٳQghr{4:B2ʃ'`aL^/]N5Ԁq9~mj}n\>& h&^aտUI6o4nI[4cXʹjI"?ndq9æG38JXe/AA>{V7_5E|Hw Uw(TϕyRsٺg1(L-0y+Ķ4\?%`: Mg47']S&9\ܢ)]0S\IH[I?ݰV\!/3z9!B;/Tĸhn{` sj=9A*A}KR?ܝX+!ZDkEۉP*z"kTR3y5WrCl tҙ#V-.S})U!7t1,\@Z<¥ҵZ{|W/6q,9|G^{24mZ}FDL܁FZj|q #64e9OJ-IN/!`5(xb}r;Sk \㒤q+nP`i}!5l߭jڬ=U^(m +ΫgML^eϫ{s[*i!+Z !ir9RiC\ޮ+Wq+KqJ݉S"\hl (%] ZՊSX)>CW){o=mU>:`E.jG Ki,ƃZ0f]*GPhWJWv4D[_Q~G⎦06^R_ީ P.UP.5Ov@3P˚͕חk}ukZ+vkh}Q*sZURj@$-e] nT .2]UC+dVCY RJtUWTJÕCt-#BJe#;tŤъ9DW0 ]!\!]+DI QUs#8x{6@F,dEwŸ7 \fMiuG.+6:c ZM["BGO!W!BV9CWWg"ml[׸.BWRiT  ]Ie#thi#mnҕuGJ1$l)T{< M+4eFR!=Mw6ֿA.ҕі:g?s.孷eOW+k.qub@ώ{9tK^QW]*6WmS rv\%]ɺR,CW+U]SKVL8CW[W *vBtAb+C+c+5`=]ub%oa:IO H)җ~-񄏓M5bۛc+q+>ۀ ?$:0[o/:jHC0+nܩM=y9;j <:rOL9#W/h^ME5 K&تeTS5hjξͩ2jȜL*˜7J5?OMV_R}NV&T6'hB]kLwkesЀLI.t,B41 < AH$T-W\p4l V{.-3'2}t:~/;1&SN\K?P'b6Eoo>"IyqSx ;=*Lt6XYgLk+ ՌݴAswѴђp'qBReJtu2[+|*pμCqnDhv,S%ZIg g+@kXe v0BZ}ZBM>0-v@*qݝZItJ޲7Ķi詒TKbOo ]!\]+ 9H Q U)+uUCm Q޺"]qy ¨ I++thi;]JYUwJhMpK  ,tR+̨;tp}ZzgPb^U'銫Br˺Q*TCi*']Ū2lIrGYqOmQVi0!ϖ̱H479ؼ{!Z%JwswbG9qȑV2#hU9Di0y9m4! g grn}'骓te,8DWrg`H ]!ZNNWmGO{]Y/1]!`k.Z $BL%7=]),zgEUEJh5R([@Iyʮ骎!j0UBƶ%']j]`ř3tp3thU QUJ%4qBB3E^6/T TymeW:\ %W.ҕᶥ݌OϩGP!W_d9ASa 1hkw`k+4 hވVD)YOiM1!Vhg * ]!Z#NWҚHW ]+ät("t( ^0]*zCfGT2f?^e׼qDmf2[]vaνO@ ]kzjV-~xoM(~~Ut]+%_~zSp> "}?@!^`p_,sPBpRBŸth% ""= fA4^~tVTnnFI]y[Z/r[-zwR9q{hTq-j?"j◔_SPQ d<ݽkL˞uNB5s<5/s<"5B!0"GNzZ}_%Ma9 'ol-&0I(I3J2<$YHUh$Lh)Pр 1[H}w~ȕm'I|,x_> 5I|[bFX:RXcSh@rkÕU(i@$TZF5Rj=ġ, BAMIlCMa iqN$A,9q0bI ;jM6 %%'jP-F\Eq,t2"1( sfe[ɀ#RR)81&E+WyB5V'I$j$)0@ PÈQ>i0 +CC5Z.*r4}x2j.FRFqȸ#/H&J%%-LBFšAХcb%a1G) 6HF0b5qd׿Kֵ ?XJ%^?awSU^b՟c -W=$HK$&Fփ%9}s^3"Z䔄;# lQYM#ut;M"Tj'=:fDyd]}P9AfU^܍S5ԺVDC*)IV:ѝ(sRB# V]LMZ%9įZ)|0 46G;&Xr!xQ+QЗֶڔ^*QT3I:G͋Eէ (хk§tp}E ܔ43HT Ht=+ )D!; D{j%x!,;v4#H2P4@ZE=6XS " ^TTlPtA[wZ 4@shmtV*A5V-Tbպ*u%n@2hFg-K75!1ѕ<OuT6 Ņ5 -cwk.4˺ F@BP+GR7fa=gAtPѪ J(ڑkj $*Z@ԓ*e,@c'[ZpUTiXYi)@fT `6+(\d>id, |Der HM$P +*ӉPu5%M%$A\2qnB AvEm%˲fH57]\?7(c)(`P VCH(PED&TDE"3|5gAx`1g#n n!6K17)_JJ ufM0*A%oť#3إ~.V!joSCAV(J892r XT5oFY#<;"JPAw/uV qj2 RTW.#{/ uYh +ʌT=#i̼BU3_Mr^KY%EࠄjL YV+"1۽@VQ=r+ZQCk>84iXȠ xGsZܠ@ŌUDZ1&"9ih>(ؼ($0D!N&mr:?u;ӻZp v`c{{]`!00 ƛAy@xc#TYiU2Mt%C RbtLECN`GeF:#.3(Z R|$ LjyU,C@/ʴL1hx L}辬$kIu u<o 76JrA#+PZDbYьmC5YK1Z1ةa*B%n)}qy'K =`Ye>ZbM=A!%DB>h AJ./sgmG]I@(ʠvoPJ%l-ѧ@hg]+"ѳrBwP+P(vm!Q5MH#{Nw=? PТ td!.h]a6 3*I@d(J+1A2xzX<(Bjw7qVUp*y XT**BȲ$P>flq)ئ(jS,ԚϽtQ{H$Q5$YI$e(m@Vӥ*x/GT^A Z6H*tM>wAK![[&zvnnz eu2o‹t:lӮN\GI:`֣;V$LC'Kac+i0{B("bjЭ(kM!JKBq$RVO ]j31iGr|Ѱ-G' <%2`Crh[SP.O(7"fh8(uR"˕,.~ʠUbF =>tPiAz[ZT}f=*-+P!>n;"($'Mm2X4kn3"叺|AW F-LʴCE5FHmr3u#&zށ U @ +IUE :ƀ6mӺ`fa@ZXf=ؤ=Cɗ U=e2 b2 D hs@:'/{iY[Tڠ@0XAVT@9 Z6i/zBLf4CS3A8QZ{*(=uGc(6tqHUێ׈4JCaކ`ݨ1jEr7 "&b9\Yz6̥CH28DaBN Zf EzmT'3!(L ΃rt o`j?_,,n^.ۣa AUiKqDTͣ `h/(N߽ys}M;7824dQ _} ̾VRl͖?9].NtٶIGoOj[OֿB:M?5v>[~-?##DkұRJlnެhe NTZtE(dlJ9J^pKWC랉2=J2]=te޺ qb2t9BW LW/T> 7uEp ]#U<]J홮^ ]icԵLu\:ܱA=)_hzy~fky] ן?Oeo_fqq؜m\l秓sڋ9z\#;-+ns!6wRe|y4Q17>_^=f8t]q?`: Z6TɽkV _ yM>7Ey9λPoa~ם.sfczA7~6 ،WW{|z1_2݀. D!P6ֿ]h3ro;vy A>ͧ}u!z4rwV3ɚtvEח7"? 7{fcGtRE#+nꋣ[>  ˟[oh<%]̀2'ww=ok?쫿#;9󓟐`hFoRg+e6;=Yg>l]|{c׬6 *`||Z޴eC>1ll=^O_]H.xTB#sYo"AI Mqu%]#c]>wߐsF*KD*x6O@W|zQ:VoehQ[3/!d[왗8_Aۗ6dq$N5ų=oU<$un( hO`j(æz~.*^|yFnk!vRtPQI_ע}>p@B?ԨʵF\"||t6Ηr=$9ЉʍЫߒdw]aԆ|U,ӳtN 5f>z9nۏbJ<4xg=VtinnI=E]Cwq` 3==RHdm^$c/HͻBqKS(Za77ZT(1z&qY sM%rOc7݃r@T Z=ú[u7ёh=ֺծwj?~Ij̄*|ʄ5]s"X#1衧IPޣ7'ԡeː~Qnx)Zmń<Uv2gS9DhyJ'DKS{䢯rnjlGv}܍^ˀxv]^ c>N(*9ZN#v2f;BšWr@$^b%{6f:2 ONW"dNyte@pC ]G(-2]R0%ufB"^M@iχԞCtQ>c88>}z!Кg=J1]=t 'CW׉ (LW/O4*]b2tEp ]Z/XtJĔgJx8_!.y@q $6>M,߷zxHF&53{k6t* ]eYz\CWCWQ^Չ2`SB>kWxJ:A\0]'a]e6kW>v(mj}GᏖ@sBڴ\fWn۽(ye6`yƛa0lxtoeo0z켐{0=BX,$fehzCǎR5c) r+\Ǵn}fOaxB6tutAR X'.ÕQW>ǴZrCWCWFs UF+~2QCW|Ǯm"? &VWVWR.ᣠ+]TH!y KjCW(h;]e6tutńDԈ2`)jCW2u+Dk=v()oKG~/룮 cRN@s5+L9Ԇ2\uV±UF:EhjDWd05.t%r_j(iJrT1m1Ђ ks[Uoq쯫ٯbKNB{֋K$ɕy'I׃1޸/`G, ϦovL ?6nTT\)U(ּ>.f}?Ugnf8E"N2Ypym2Zq fjbSOU,tm*ÕV(nFtӺUF+ȱUF)YCW_ ]]mï]X(~`>p^hSoFWG+h]O QjDWX[,G&P?\G S,xUBUH|_䛒9={y0Á٬Il~j@֌O-_ou8s+g~t~]rni*ŧuK1TH *Htד>߉aқuڻg3۟}S]`qE1~-gg6P5"<I )M!jKJxh|{fIٽ@*Y^gwCC$k;EWgY\A: m┗Z)}6KN"&g0Xs*'SޮYd2mL٫ol#shC)pV!s#굣;_6n̾b|e2"&|e~΢ ǗnBEV*v}cJ%䥯bӔ{d~1ȳ-S}eg+ʳAqQлYQwy.a-ɯNL(J|010HE2$Va׏>n,ItdPX,z@$J'x'*`5 ,I+ƙDGCTrӄ%`騔!\Qi#'28;YW~nV]/ 'T@S(v󠒑:Kca!w q*]kQ\Όbr T'wYəI#eL$JνR 8!Dg'8y\=s+h* ywC"dVo{g\^RKJr8fN4=;}Uvٛ;=.믷,] F<=zG'4<5~2pޚ)`r,Χ:V =OC_nqw|^ %}KqXPg<WC F?7 &甌rAzixK2 g a@mXy-R(vF/0NpeEHZDHֶphx0C:v3a(6bnY -.j};If=~}dm%óv'ٹZLHX{q!UH<\ІH3&H|2 YVMQm5+rIt,tֻN\ʒ^(#]laoԉ';jXvLΙ綮ͿO-'ck\7 ZFbw1ɢ,7bɍҨձW똛0ˈ˖=[rn|Mβb׏[8W\*<6{YX)+xeR^0̲4O-bԽ,#z+e9d8韍1*i\|)7hWgf[[e׋ati[-^YȾͲO*N= .9{yi8.{}aia# kv.Ѕ*S ٕzZTWY[[bش:Kڹs(mfţ4dg6-*P:1 H&R-/g0FFTt^̝w85^窢&u3TA҇*&ho IL(IaMUb% zșGʙco6rԎ G~two_|񥗧F㟢*iᐯCf՗ |d2K zh8K-LCVf&(qmJt/ǹsG aT0k JXFLk^%pֱ$t5fָn'5VQ:q.@.?4 L$xZĜS)O{[7F&۝[-rP# ܁"6ˇFmth.i6"` Z(!MƓۨ*·[Q3|m$l?:[$Y+a>Yl|q rs!ٸ7j[T5 sjNJ!qFJ]kN[[W?uL|_x1wf^.`\/\~u?tmM.-]9/;h^?Gv'a ;]l ~61(l܊y${ܳ{U9-ޕ:䶻n~ӣ0}$X|7\dZm` qc*r~xw=ϳQ+?ػ+tPZa`xx]]JMOO|N_xۗ6y߯o߼Lϗ|¿a L(< pǝo- xnx5J߭WIs"QC=;jTY[x #њ p;0\iO1N IKhi "uեJFْ48*Iݗ^3qcBr䩿:rJiLRhF0H5k E hPG2JR?QTӔU:GpnЀMp(n4a!ϨR'&s @( Bm!@5sU ig8^MZfGN7]jmc,`B Rϓ lЋsT>!*E~C$GQ$R-,-"T'p™"1ΌE_(mXCTR %&eFLѧS7F+B#B-+PAӞoV4;>m+E}͚mYy07JlJ+V4jt+Cg}q9|.;hÞyC /OB4M,%8m6dÞz,'Q'*4ܚ1M4N( `ޙ0\yxF >qj[".ڲ.J)C5*} )&HJDTfDl{}BuT1&du)k2bxX3ХPUK zpKf@RwʸALgwl%ﲄطZ ?22LrJYAM%K%_fkV억RN7&njv՚]fߐY-/#xEҺɁ`ō3ʕO4ГӂSR;ZZgOP%k^m#Fp-wW(WWpjUq*K\mZ+p *gxZ[96#\^p9ݼ3XM'W膭jjņj*UbWTeJk V,uW(WsWpj{W[+5] 6\\\WRW[+8e.  @.#XW޺B WBIʪc]\ PduTJ*tW Xs PP *Uw\Jm\m#T_Oxv0"VhByVY´.8i˭+FJӨ4V6bZk\Pv'fr3 UƪF\Cu W 5 gbvP% WֈrFJgbvMq*jpz"6VMڴ3XMް3XMݐ3XE%fp\-[G@0\MOਦVʺ Ujj qŬ"R;+L;B jVw\ʺAp.9 X P4 ՚[WҐƺF\Iu VluW(;+T+kBWF&$5?>Rw 5w73&yBTdHSɮ QKtsV(3U)e}ڊ*-ZRiQ Fʭ2=jzy]tѝta/A~2}~$8r^N#(8=h/(`RAq. IWPw]AUWt2b$!ƘP!{.}:8,ŝ˺\h>>;?g騳j0<Nav {ڛ$nW-gqgI:Jiv~:΢㝝QwHep2Aoca({vrJ#}3 o70YQŷ{_n@$J"] .ѹl4 ?;w% `A f쏢?͢NQއy/%b%mw.A(=A0;?efo?O{^OO? l=,+5>gI'{(蝗gGpdWڏ_ehp_S$~:5 v?8ݴ>K%k2,7q,ҘQ~C9/ԏ)`{חGZp̚'x9̌`L.;T }v)XUY2W|g'E\4NCwvWq,ʼ=sAc Q+.Vhy:-l}+p^ vXÇ_|I[Κ ΍F嬚7s rfBEXJ_7ۜ_h0IrűI4FORf Lɀ]RR)8DeS3LFSϏ0_Mv,P^(ˎ~BЁ+ FRi jY1$V͟y)i8Jc%|o&/PXI{eZFQ h&a FKj R#&`Bu)EL"Kx3.:h%=j?ɵn}miuGOEf~_?-%ZE )x/?{?ËoZ?<+cwS^S,S`D *5IO( Ԍ}.fE >}OS#`??8噌< q^OQoUOhbD^{2 vh=lhca>khhAYd{>gZ{>F8:Δ/ﬞ::3atjggmy9^5_>8-#+ȺҫL݈.Z|ľ:UCf>|崡M?fOc+a"2ޠwM*ku5ރ1xg/%8(L4n }^{峽{ I¡`c5E$` -I'U-,oK< ,[$ߦr/ϿڻoQqE8ۘ}C6qel??=c3uR./bp?SB"b,-hqM@n-kd5Q wt }CEp?,%L_Ä* ]*z̽Mfn=jGwf#YqٱSo(ZY[ڽ ,qjhUZS[ ֟Z6[@{`mojpoiiOVDIeZ,@DFjVPef,@rY R 4%Fju4B\*u W(X5)ʵ\Z~ߧ+Te-ĕZXA,u *3j;PjWf7-< VU5zU%mc-pe*4Z)0p%sW(WQWpj;PevRnp bL3N \\pj;P W]rA`8(wTS{+TYU@^)pϩ&W9B:Jƚ6Jr'+lpW(W3Wpj;@%6(RTƩg2Yc!v_zO(m8Λ'f`w48>Q{8h}ZQ`z7E*S!`*μEԝR[U#4.9(Xj P[Pivqe%u˺[D™"UƋ*K\+`Ƈϊ E3ږzǏ$0[NaƏZt j1K x/̡K}>)4 AeV2ԓeayOϚx(H8ߕ|Bxns/]ѻ*S<}W*_/݅E(<26֒Fa J/8}#hρL,ox% Q}X|)PBHhX3oea}4zJc6;M; ej2R48 w0rfyEc ccsӾtٙZ3ԬDMi%-*ms@`)q#n1!3 ƖEȃdJ+5N+Z;7IkMk *Ƙ=6Jm& ꋷYP+T3#jF&tM 909;s\̏Vt;Kl_3*^{FϿS97ΌSrCƩ> דa0(adA1κFfq+t5kJdw~[rSkNV۞%XiLuxscm$c /&z]ڵD[G^2(Eem)mz]dyrEIbM]XvmZ+m𢡊9^,_ c,cr>vwŘ++wFVtip,@@RF\ % \W@c;^,[zU/)ʡO !­kʕʕOvS;WSii3i g?I KKa@"\\]嗢 U6N\YqysڽqЏy 2 y9`'<\]mo[+FvCiؗXX^INPe9b>DHsy3M;7v%/u>kwtƘ+!Z>??ϝqˣ ;7WIH_rÍP_йzS -%|,+Y#3s̬5rO;[_r\MNo9qDosZC##ҧN:Y9uD=sN4T&ZڗLL}x9:?io`hٯQZs7l#q?a`\Q~_誣5~ ݁B YK{DWhU&hMxtQn,s=+vz-tuOwg'~hh(JQjy큮2~UBWϝ:J]}tO+{CW1BW}tQA]}tJ) ]u6 ]u^=w(;WHWXQWM26'J}*cS򧤏8co}R, MwVvsgJ%hki!EbXpUGDt<7tڽI;Zy:px#5U RWl]7> ^,,?{^^yyVާ2ZZk_z֋8oxׯ_c`>]/88dPKJ.R0_uuKk\n[TY-_:u@>0>9e~/?N/\W0' ݳesƿ24"h1\t}T3~Mry\:CCuvLGӓXȵm)ZֹDTorNFחIScC oA%2E*T1YYd/FXDm$Iv^j..%Zrdj@FRAkѕ$ˉCv?Ǜ`6zpQyYKXv]_>29`uhdue5=2ܼvv}ş^Cm\Xѿ?baa,.wXl\iV_u;7ėqٺk7:݁1eT ?,8rӏE/|?uwlzP\7US\cRd"Ҫq!C6(R5PY9Ұfă湣}<+Ib`RBD)Ѳf,tȈ .1%%z AcV xc=W}>v,upYC1o<|{qVW=y+K.a:c ­IBpN6IJM-O}]%:k=XQ5ȀODrZ={.7I.-..Y+7pױ9-We/fsץz#+ӌ?dzey$鴖ӫ]1g痚L&?-_җѸxx}+Koҡr+h|~1Wi6'h_{Ir Yop/n^^K2 HGwւ* }5Gi~ʏ5 R'ox*.WmmuKb166_\^6y^stn '͈)+󷋮/1[:.w"fᖻ{l{kpnx[]͍m(ܺww+=]E|Gw0ū-nV(k$cY>1:eff%e>O!@;?gǐKob<'˟V߮?4>/+Ȋt.rշ$͢R"8N.Y&b*6xt=$ :ĄM01*ئS*:BޗzSmMVtҒ}f$܌˥Ԧ B̹ElԱ,ג-OJ{vCZcPOBdRɘ@=эibK1lrVZor}{yI͖Z6f!墛L$()[ı*BO´ =-8 g461.`lLʺ8sVՇSI׏=M0f?uje*6!i(݁6l[rʰ6&]`JGM)5LFwyrDë5/-&šf{EAuA~ iӟ,zʋtFz#Y+O`ɐu>%U5D$\؝7BUDsM%%9RlfW9\ VG@kVSCadF7Wl'R#|a_#G|1!- m@ji;$XR!@*EEd8.(A v6Ab69ϪH1US*AfL c&I`EŲt :i`R&"D q;a*VQ/BHZ[vCeI- OYdB m>q Z ~9k8Ԡ휩22x(QjPyU+}({]4H}1d(7p,Kה .m` :(tŅexCl(4'[x*xTYgVeAiXu6/P:Ji%QjAL-ꜷE eH Vos(SaJ Ɗ+@:x0 e leZ|*fH + SUQfO9Ae:)|#Dzgd+X2қa@ Avc =FlTzCkN2ȸB#MABX[AH iHBB(2m+j7lV鄼5%- !28hG81dP% 9Uϳ5VwVUVd%gF^V3Hlҷ.}~_'󈓹hŧL,Xk#tdc֣.Ib v{"*Xճw!}IHa8#tKنw )X4h 6-Ch'R];G z-l@ ^T6EvPh#ŌV3 FK[<85#$@G`?c7, J3%2r  "ݑ8dD.j<Ty>&U%VM! [9BIj%[?ԟ:5M셡FY]xSHm@\ 7[Q1m+XH(@d_nƛn`C@vy0yO&gQuY5Z'u͹g&8P1`0u+f&:jޗLA`6t Q ü ڎ*'0YGwYelha?$Nf &i M(mm#bKeі,[7L,J<,Vι 5Z\TedqF}cn`g:oVmiQt0Juuۛhbz3p1YX\bY݆R<(0NźkftS+||5W7*S({ك;ƏU; v)cZiv~pr$1Ek&.xyׇ9_`Z-S'lÛ6b+J$-KVy需ƣb}bc$[Rt}/~=LYfrl1;zfyU<.";`)liYqneX_l yHY21\GH?cgNlHsuW?y7o?MlCȶp>x%hkzs`v<ڍaF"~j-Y3w%ɋz YFW{Rx6h#!!!!!!!!!!!!!!!!!!!!!!!㶯K5@=+dgko_qkѾ@kLw =w̸\ sCn8.ב-)^=BOx.Ȼ8O=ݳ^oz]Xs¦Kzr8~42}}AR! GrPsi-}Of]8|cCaHg_ΧD^|v=#/ݖ/CYCtB n6Ymuy-_rH'Nz6 4dYcCB~4,4O4O4O4O4O4O4O4O4O4O4O4O4O4O4O4O4O4O4O4O4O4O4O4O;~Vs;h=x? q~yßOc|ڻC<ꪯ/[pшVUz dd9Y5QWw\fK]Xi cM$Sod2#*!Lp%'7n7sY(]WOFc tgu%P JBږ̺.J p꿻^cay<> 9}݄H-zo߫t|}s e њF'VW-gь8m} s%L!ixꞣl噦Wg˳ ^_Np9n:|޻IJhۥ"j_nǵe*_mwRz󝮛n#ocoY~SPs "YC]=Ӎw鮍j\}0,oG_UlgSG^9gg|r~7*=hpgo%btЉ-{&_ɠɊyoӟĮJKsϣo&O>% o?߿}{~~]w~ۏ\7x/TaϽ8;_a[fNmqNF[O=m-Z}}[.cM!bMso1\1͑Vsp*TUPH `Pom F?!U2X2#sNzug]<|NJ T^ۂUыċ [*#˔=Ȍ zu+$M"t]g/Nq'ǟ)U+7|gf *pcaN`u&DbbVX>m*]i8mB 5Jۛ=o㼵Hk}beam,cGѼ< vيHOv׬azveQo4^{h=Pi-œyׇ蕣&!$wi(Q#b0@CrH LJYRo,Ǝ4Vw\](҆ ٣d(1QQ/_] [|]R,5yoXhy@hc ΌRR Au2Nɠ:T'dP Au2Nɠ:T'dP Au2Nɠ:T'dP Au2Nɠ:T'dP Au2Nɠ:T'dP Au2Nɠ:T'xd@Z-^{MKYTGAVKZr/~2a(hpf1l|r6wP*a] "Q]AurU2%H0H$EDvCy.HCCB5R)5+>\eT*xd#$0k1Sq wVVHC/@GM7+>礕%?44E:me%3<)S7GbB[&;T` kߙkױXC/(kGX`Kxu(/Ώ&LE(q-[ JEU1c=#H\g IZGG z^ru$5n&.UAZǣ qxx[SB(ʊ %EDamGh-K869kp[TkOE9\/]4ϱcUd8t(a zLhUE]D$,Bi2c>}zmU"ꆨp6D ֝YAFue :uK8kZ9% QS1JWµ+th>ty#+K+l]!\kBW3vt(i(Jג]!`;CW׉ PZ戮rj1nǽIqgЏ| }\Uh^$tQ.-}eo ĬF˽ ֬[گL=},v&^eQH"W[9th-Kx-o|ڇ_:75qFi?l4ͧϲW7mYxNeLiyd_ ~M6'FxW)9dgis$/6{ѥL;יL(4;J&h8LQJ;L*]!`m;CWtfѺ+D%ҕUv3tp}g n0DWGHWb+:DWB3zV)D—CWbW/NV0W8]Xv+_Hnh rYZ~t%v+AtW5w+ ]!ZR Htu_{i{E >c!$3,[Ƕ&vMfG9C!4Mzp&QUFлݟ2J#]iJ4oS6Оe W&i*5G)%[{te()ZDWXRp5*GQ_=qRG0wu+7ܧfVr^@;C#eu7/k1;;,Fzޖ-۠. {}GIm뛷%gRŸd9s|uOY۟w햧hbm,,nlE,L>?EXx17/2!O"ur)\ІH៼3&H|2 Yݺ=%W߮o7K1l8KZ4D SMpb^^/FoӸ,F6]&Ci\"#\!1{N.yx#ȍz7O>7_7U1nXZuUpRkSQ!fE;϶Ηm.5%X=|1-OM;~u! ;~̢G1l~Ղ:|-3>ztΆ*{l)WlTyi'ecys0~AC){5FQzȧ1EQ;L>JhUY(P ?װosٗct.eUI糓2'/=1 7'7rAغQo{ۏ_42w*̃)>Z\L5tbR M. Z^8l!)ahDNCJ z1&Z9C$.}b"}hf(!3@p&e6T1 s,PY'g)gVv;#cv;o)2fS{QM }⸜Ey/Ѽ7uޜ$?wVA=8@3,.τ}YN;GI0amu(rGNq"z5i=U3:˿a4)>)v_wprzP2eyt6;Nj*ܼ1 C"=J?9f'|_#t8-`4 DA Ȃi :V?āDvި!jBPƹIB81 L$xZĜSSR:w_ŷkh;L۵[]/Nq_\SТOIOݭC)ˇNtT!`&5jeSk%U{j<t5|79CGY8D$ !R >kC_סZYM`})폤Bv1EϹ?Kgn[53FPG1A͙">r&>cq\ H&ZTimj;sP)c$pPq6reLEtA!j_1Re{"h'&W;qr=/pi=dCd$C_Њ=_efGENoX!"o\tzGײؾw]j :L;KV|e:qbQ$ϨuADH1fjjsϡ$7l/~xwPWM2T~J3^%)XHڀF#-:B\dP ٍxn돥AᾏXݼ6NFrVzNicApkU !HYFg2m`I?Yc M9Ɓ#0)P1Ш#+IM ]+#NZ <@CH:R݂ңíU+&ľU=nO5(&n-~ތqɿgʘj+b8 T S-Fsy=ψc9WSFdrFY!?avn}IT|uueu*4+ځƦqo{;d˞,S@Đ0Ods$}Hg82$Ǭ7Z8N(ORUm_ C wيx^Eu8A 3ԣsܖD%oV[W2`~v>ؼTP!`>AOa5M\n48}w)Jզӽޅ__;?7^, /7U0,̙4iշ\Dż*;wߠ$LmM tVW FWi,COJ̽Gxyѳ:guӢVFuȺZk*ߢ:a\HX|?-f_O{j!qo)5rӋqv89y^Y//]r <e#^XJh2=[EcWD%-_UvOIۏ}?_xWwQ߽wxc̣u$7_Zt#+U˟ZV 6Q5ַk8j%V/;z&nJ˷BI!5݄@ݼ:b 7ckctWe9ULR:OwVz5,#˻.BAR)IͨU!x`@Fpc-q6A$vP,B|q~On;1ҘMlMEw˕h+uywKQ˝0HIN"&B cE0(ݝV*NiVcL#8 r:wBo&hʡc<(nYGHJg?RFʩ0Ld &8[Q&'C2Ie;ms T5Ӽ DgJAQk͢>Q D2trI)7.p V4;aI\1$8rC&,3LG $4tJX97g9ώoR/PLC+ Q&|΃JFFT".YbIrCJ@\t.Ф \|)$x_橇h)3\ɫO* 8DXg% Zx%V<ZI-?.NzZvkY FUL8D0@YFa ,I8;h8j-1Bֲ%>([*p˝ҞRLEC>iG(HgxA[E݌H3"-gA wwyJkD|a[}y{&)%^)鐂JJ]  p:4Uܣ9XO?w4e!wհw8 GbȗR JHrAκ{ڕ)B7qWVDTu@Ddm 'AəugV~-VhC"w]OW1U)8 JyPR)J(, ;_ܬly,h r`@n~&r鼘f+GdzD{88vex1F&;{y¥)Mf,JgTIC @"Pm 9R庶d?u,w,r]yJí $˜IȢZMK %&e`@C#w&@)Q 9\pc)tR)tT^?&mì1s:50bem*FNWb-:-R9-45-"ZٱQ ڝ O ՂH&B dk*VUFgHWFpi kph ]펉>v(ꋡ+q}"On>ZDPnDGAWt%:뫧pZDWj ]e5thNW%!]1I - U -tJ~tQ*3+.y赠F*65tp)mc!]ԛOWXm ]e"G fLtt J( ƴh]eBWS%2]=G@(wNޛqpO]o9rɆ@Yu0oUPKDubp߾iM~"2ZX4McxX2I{UKe~oY-߯]ڦK^r쬅HQȵ,wJÃ&=L&R_i5Յl}]-H7fxXx5cǜ#7쏏o^fb:/%A& c*)=" qVw߹BR4N.${"f<#RtY1RRզ,T{KhO%t+yoƼ5*%'Oh o\=Ks6+e{r< 2WK(m;\aĎ^!%lp1vp{GCB0W3Ww}X#$pD\%YMC*UBHg"bZd`-[c.A-*}nw9+WE srsJ}*]0JH"s%V1W .Em1W -;\=CsG3 =fcÒoMݽa'L'5f2"L'TwfiE K,Zc\Қ1J(igc֦Lp{ȶ|GB)xg;zy0co?Z||ЪG oRؕG.I0Xp>r8{l_ȫf<)_se?*{Չ,Lrq:$g/NG Q6{fOB7-5BdopgԀ:G8 pWE:T_a]}@ N]@D73մ 3Gn2 TE+[k\IkF}mBIPkZܧM IQLd\&(AXR`?[0/W<)4$".)&9pk\}xI> |K0yQ>g\{rW`DJ~L/jum?/>)lHJR}? /֕rI1H_~n8Zc! 䛥6<:# ?D:,P0<~6\ ٟPUVUm] (m^z4baF{!VF{fx鄰ƃ`n(z$AN93)šL2> 7A)[ "E1z!Uxd!R&R/5eD ` Xy$RD$He?[YM6})rnQYWWR[/[^ euX +Nt 5o]} Vͯf+Pa'(ا7fX7<+{? < fIق;3[U-%͔f[Ss5}(ۖӥ^fTrsE),Ns1!JzhE10 8D\X`947V3ION;=ER!CR'\Bĥ6Jvzd(иQ=3(* %t9`ŤF$2 RjDC*_(26gQ*Ԭu BvQ A#P?^uLF-2" |&OSީ@*pTawӏ," =)hIaR3+<2qc^q'9WVcTM$v%(bY{a`5*[B" KBg81`ݘL?73]ܴn!p6(-3t_+ƍ'ЭnD6,]5&"wU>Ѱ-GI&Ҧ9SNJ(Q>z%(k@ u +RQz45]1>?hJBaqtV&̰gWl*"yU$_y +*"E ].\%[p+rοLcKၱkT9k[8Sٸ+9 wmA7.r6z_Xˆa9'0.)USExl|6̼.HaDAWw:K eStXޮWM^oLh][R}sfFV/=O!c̱|:0KFa\Krv 30OcE\ȏ#j!BnЍ.Yr)6l-c<HYoɏ?mL'D(\!r -%&HWwL!sdf ͻ4ђ6,Yydcrbh AK)9 Q ƑIgv;#1;oA2ʬouaTih#^RfatvyB= lM]rwmٹstq8Uvk6CO{ِbCB-ޒWZ䆩&=ԐZInPqsHem:&ִ5g\FX[ξIh=oJκo7~d' IΆ#o`Kn"'Jќdsk,QO+Lw\,'j%m䢜cBTQܦ Ew9lVJH?B"uun~wM#vMvX ԰܏h)k3}B͜b}pqs80؜t,784e² :w;-V=ek?Gjqѭi!hQE e/vu3ξ=DӕJ&Ӱʃӣea#-VB;~J7:η1'_څ,^Dn9C:jszA] uKNf{9tY.PeB{rPG$Trqh:tR=@j F6ErE؄ A2 ǨU1kZ^jF5EGSpv<'wv+w+8[@m4 sB!*RdxF`c=1l$J_u"wY /Ruƒ@d~Ǥ"Z&03,* VDFy rc%P _(g'pxX촍p^0cϩ1 `rNNbY(K뉣t(_\'vR$xA H+2P,`i͓QHΰYEhõ[c~M|6 Yakiz%wDִ:m~pfgIN KyiDKfŷ*,0 `I!*kT()C96.q)OW\W +x/S+ E$LfPwV @GqD01:"#SϏ)ӣ0o%P H ˥CRpdϡCbCIh <{dg&oYn5gcSE/AϾZAqoz:^3%DŽ3[8ǽO:1rcV(QЃ'/FeG5NO%.[*2&s`b{g|ac)TRz6zJ]/wH-Xa\Q 3_eUlSqMSH&F˙$߽y˻翿>|#lroFC?8 & a"iV _×4AbⰻTuML?W` zRDhk~JN}qf]StMh&A!~ -K_! 0@]\x)׬+G}>ҡʆl N39T4 <0* ˨G!=Rr'5>L>m8=92 \`mD--Vf$`k]Z%+-Q1y;:2; +l!9r8: uF+F "yC ŚSm G..Z:*Ғ!-yUX5vPkӡ =OhHRr8(-)#4k0d`S7,(S(BeJV_d>[+΋T"xdI{Y&u(j߿T$ُgdޣx9k>NM5'T NJAWSRj̏JAȮJ3RB>sZKRlb6@xJz0'KhJ4J>{,໙n2ӄZlys AR#t24@$-IK:H )E-մ/ g3vx=93H`[32VQe a?oI U 8o/J$VWn}~ UL}>{.۫(QAOՀ9HBƞ1V9[f` 3,i4; Yq'iPEmGYy 1Ī Zga)eZYTZtZ[w5-we`Xo.]Vg޼yw/_(V,˫>Ly#p.ۈծ:Xa|PWH*Gz)`t73ogeawf2ڄ;r8QI @AUv+D} K)'_a%o%q9^ ;CZcc\׋ K</яILOdBe nx8n$502&a(yՑem&Z*Mso%@dynxrL)7|:'Kt"4i?7[7fC+"ԯ_?0@7u)f;ZC#Jz"- (Kj ѹ8|n3 &(}y{[C\rmP&qx6ٿ+ Ը~Dm a/wBmMZ װu#뢑Rs:*ǒ `q ˯qM{uC2ƱK] ^J ,7bnh/9r6$Qs-9˝RЍ&ôn9Cp RGҌ`o.~/B:Kftl4Yx0;D<+sv"g\!r -%&"HAs!5k8L#98opxQv; y*gHe:J!jʊ0ݷ=e3i_!7dœ\tzHXdžyvdžݱawlvdžݱa6Sz< ,ɼWUT+i%R^yrG^ySH.hݛۻ*t.jWiڽa=-P<#O~3{g͢cJFVV5ZM e֭.+,a]E]8A}̷Z-nNY*Rsu6B'$z;l4'o~0~#!4:a=Pԟ|;,;q)T!^9J#Hpqh:gZȎ7ޔj#O!]0ϥ@m4 sB!*RdxF`c=1l$+s|ƽJ܂3p)Lsj K``N'(;Asd NZOe[YV1 _M62ARߖ \\O/G+5SrL?s|>OR%OAײ3 3dbC2|ҡʆl N39T4 <0* ˨G!=Rr'5>L>?y;uNk8DB#OW꓂[~(YEʏXDc4K)"xj*Ј7[@ AQlF Gmud-iJJpp^T1Z2ѠmPV# $0 rfb96jKV:CWЭܦ}P4ooL QP2%"Ti1AyL1#u `_8QtM#i\n綶sEnG)zB N B'Km Q%ZVAPI(a!hQ9rŐ*-iώ/s7,TX2qq vLʵɨE0VFdAM{b<mn?׿H`0- 1T1ZAYk;ɹ4p\ZZn?:?,AG,4OgBf{a`5.VYB" /h8˪QLsKTq1Di>њefI```."bDdKY&튋|-n>曽d{& He(JQ|2J")uV" | G7G;Z!!cj 3soɋGꑧR2KHrA(rA~TQ3;3H|PWNpZdj\KLuW)=٘W_ei<,:uT#})np09sH[G‰uXTWi.^huHPфVVQR1qV{1gj$#aؒv=NF|g7  ϳTֺFyp!WӶ^wй vp | k0Qg1c꣪)0%,{Ig~&_zBr}'Z=7'/o^^rHN)bxЄw 9,tD9"<1aLnYۊfϴēhʘk9^%\|VKvW4,yEz,\zy݉k2/8(odx8-E1|gX\p~ęPD D^|{q{[y\^G>S99qB,bFCp0 ]0x |+y~L{=2(0k5f,{-#chnE{mciOc05Tjntg7ͤNfa^AsUx3< {cfbL=MWyѴhn>]j6rhk|dap3gj)QO Ot1t^>yFܟlq6̫ڼM{7^-/=y= >-\ɚY)zOބ˲*5xP{.zC|iΆ?{if|}O#9j)Pu@D*,HR Ke#űӷװa܋7¨Sܶ? \3 J*N\Ⱦ}z~r{dKw}`Vt˛k#iF+ inNF+TO+mF0IhnƹJp0[3n|Z1bIi:,+A ^uݣH]QcE45Ϲ%ݓmS94AWL}{xu@ogAy' &77$ {.{=}v)}.jc,Ն;3tn9\;a5}̵B 4_?ZZkmY5 n>ϕю>N(V]End4@ :xg5ɧ{{Ynf3}v"N`'ʫ%rߖ]pцG2il< b]3T}eijlV:L'Ol l{4Tv ߣ-J3~5D3p12qVǩpdA)b ׁatgf JhfNiצvP!.NF\Z7mx}' g/_MqMO"S瓭H2o4+==C3.sOI-Q)kf i4ͦvDh,"(}c:BƩTDؚ|;|ua)˳`" #қBTJ#=3FatBX~ .b(zdLK*Ljd<)VO5p ` Xy$RD[.Oܚ8qЧIb臦@4 }UӤLRr"{gHIܓHzDVnyLdz::n%}w%p7 Iq Ju"1 -AK\ QYcu,xP*7xq--z .5}"+]ufVTuFZ~6g^P'`k U cP逰Zcg)Oc.*5X@UnSy ۝@E5ܗprpW j1c[P;dx1HCF)'[¢thAg7gM7\L]ɷ}Lֽ/X{2mj}VW{YZy]h]"Τ ʸkY7cP4}`љߺۨ}{C%Y^-,h?z+hKRT-hKRT-hKRT-hK=lj\i-ANݛ/-3q*Eu1TL;Yi}d ) TL{(O\wi<"2UEox,䃢ȍ@ kg L1͝#7I+PR2rI& j4!13ˌ3ϙ̉dٖt!en`_"| K rؔnv+mLg85 =#R%|ЯG\DEJrhE1\#ׁZIlCE@1I#&Iz|r\" 腗 R$8" B'Km Q1qV93(* %d9`RU"D)5s!UR-M3YtZ~?1u|Qa9dHp1wǨit1ʈl4H#Ix"9Er\Qw/"sJ$PŰNzji GX#WIΕ{ lxH쯊Y="O>YG컈`Ҁ[II%G/0h p["aU#x,2 #OP fh/#Et$LEt!"'U=P IXBg `*S:$ rl-!\0Gd19$^X10BB23uS/IN Y$53rؖe"iE"c3yYS]/xˋ-/ޙw)㑃o2Ɵ?G;Ys&oDqװިFDk6l_FW_n;`׃{r rѥa`?c1L4ON!f}y*<@gsݍ~4}|3W^|[dR/i2zukZ*+=9~LoRtͯ>pYOUmvK;3l|R~؇vz2/0j+2ҊZv0ê-s,jڧ^lQڷhKYW:2v!sY9u''0XlHC 7@Ïܘʨ(i-asޟ¤yxѐW5ukRnݰCO By~`Y8^BW+x7~,HIGÏ")\FfBڅdTcQqn )CzC%zi %ƨS`;ɉ<W=4%)uhh<{NdsKv&,>%u,z]x.P,!eੴ\CXlb#dlCZ m5F='k\Bv_ijȾec!>bISыYeϱ,=R9űRޯ,g~>űqwȮVK'ዚi#sew|Ż2I:YN,b8=HFcY_F>.Z=v m4rJp/@ݧPVѬeczOĽy|,x}-.Lex5~\Be0XR ~h<0](" fp__tSiIz>ӝs뛽ei4:Zw!|(4_s+ þ,jHQ+ft iJ+FE0S^ Ry;~h\"`~ n<-0`I89Ϛ[ޜ+?71^+_&Z\9ׂߍA|zڏL~UIokjo/]VIV0[L +Kw jPܵ,qpuG^͡\A\H(v؂LښOo]m4!, ޤs{q^5OQ yZU,&Xlbthf265L0뻷&pA34{7w.o`&K{ mHOz&սkVju;;v@fS ߂Ι|& c"{&CD{fh%nb9 Q Ƒ /q3$m>+| taTׁ  {`SPlFOR)9mߒ}}bvEOƷYybS{{7~7Sw~xMxya27V\;r}NE޲[ҋNz57d-6⠔d("gV^]N+3 GcKnӢ"Jъd+k,PV8d.^x|r^cor ^#JqTQܦ0Ew9lVJHB" m2Z~ݠVn5uZSϜ֚! 垶Ç1DΏ5@TfpL#aUV{VXōb2aYõ` K 4n:S"JOٴW+z4,)֤_\3|Gи#!:K jɡi^?;$acR:'!b*P R|O\_큓79r71=;=P1%`1U*+G uD@%筍VAQz& Z3FY !ap)=LI Vi, IX*A@`-8P8z<\,r:gK>?`KgY+ƾCP|q?EJ4l//l)0G.]2@m4 sB"03*Pt빌([e 1l$xVXd LprÛiJP ȹ`y "1˂) J3H`@wN%?x`o I~o.RD2u$&)CTָO$ kŸ_tbNJ ^ۍ)l[E39gOS c#_K)(ԝ4|o(ao%P]tb㒁3i?#Y_d_ k{7 ${ WkIʶ8TϐԐ␔84/uWWWuU]]etH\,[o!}כ%%) i"`w~Cd5+} ٵ)??Uzu1^ʑc1tt9o+C7W\R'CBr-ivV}1X^>xfz9M'cLՂ+?˒T(Qz3-\o.pA*ZSYp`mO骮Rߍ*,A}5 8vpp;gUipJ^烬v;0ב1hR7Xb%FQFP whٯ7^ĥoFWHvi&Y߭%`+Vx3? x4_f͚XU C (3׿%_޽yw{ޛ?5La/H(E"@=_s]Sv躌oj3/kt5y_),+_,! 0@_xqǝl Vm6ҩabQCi*M`pF<>{ucF%`;GJQ&?3_F?6VqFFy6"䃖c k.{ż3y`iN<1 uF+F "yC ŚSm 0G.9[o=1ҘL,9FЎnݑĢG~Q, ̓)H-NkV`.j}8Tv M.o ;3L 2[(G\3QHJiQ|:4(U+tXEK1X(Wf_ر2ޥ_^W(+~@2^N}s{3Wk$:KubzNL/ՉUex;!ơH9h8G}4 {lG,`[nS4~UV_,E^J=;zl6K l-EEN߻;ȣjhCIWŜوY/7ft߬WoVsaE&x%肦FK6 \ $bi|ߎ};t|E|Y^*WK z\AZ*ApC=cbl(J(%, (G!u9OjwطcRI]M_ +ۡKޡ۷/Y @R7+0h9 | ?^|؃P?j}RfY !{($͠Ȑ__23ӯ:U6H^KE2R>}f՚ {)^&%nݏfu$v4w&0E5H^<ݛjzE>CE.~̯NRPԑFQ>؛--* F4 -{iD%]igXHq9Ay] c4l#IQNihȵ1OL0&l0OZAZW{A7?nzkjb]IU>nz $ <B)h x/[Ƣc\'Pc`bҹVpb#VՁ' VP3Cj2!*8ʝ7\ Z1b"wZ+IP3P=4JQKs1?_&H y\׌mU!HLg|CV@^p<) Oy5C^]r⭠?_E%ƹ{^fQ,"Y:=H0,/nhkEM\w<9B!D!e!x0k5f,W﵌96M%_Z"K'KO Bcg?԰wq3$5I#y9N`.0 " S{+%R 9Z-ƷeךF[0,*oOn'nV`=r3iE (Lx.P!`V0/2ZDJGt`ZD"rR5:OH"88fSa3 A2 ('` )2GdiV=Kc=! #$$8XG!1z ",iP%LR#1s) qZviZiԴVkb-0Xv* p.yUWxZI c>\J)rF"2w,;oy{tdOÓ.eG`cZ,W*:~Op4^l&NcrW$yuZ 3{^1ןE-_ogxTֲNo:9KE*LϧLD%0liBo)ȪԜ|Ӕse8]VT LRzi>`à_jS3e;\bvy:\; ѫ+VGVuN1ՈWNJ-iܷ @dCmk\K$Ŝv9r+.n[MKPTMa>Nc10>(m' '[\~p# ebً&i7a%hwSY)beR*&80:odz&,;BfhtkƹJOR@yR(jҝ8Nyr4}z4Ú-kng/gὂ=SH.{}`7Oe(ZT$Ekj$r%jKMDf^DsPW9$\+JOYSw5ܰ,R;n*a̳X9s;B#6$>ܥ K|8Gw{@|E^HZo5E{L\X* (m>E#F{ރbhU!*ƞ0 ^:!#q0U0aJl=rSp5u-XTwW,]^TߛVGYvazWY*6) QORq@(,"#P=K\ QYcu(`!W|YW2(2TJ^﷕=}MM=&rW+UNYl#yS;zH), 4s O׃؀qAױ6$Qs-9˝R 34r4ioMe*mD"g]ƕ _-X>+ģvD<%XлlD0{ntu5&CX=%N}a܏oSv;QSY qvP"7WY)zwZDع%PhЬ96uS#h9E7f'6τx3Q\Tnl#Ju3xgE#Vp[&\&eV ǛNM MwZ=Ay)mLjvMt5x| Yis"Jpr Aܟ_l_Iw%,ncfdtgy:^rVϋG58W/ 6 XAgG8o\)@@lsBhvfa;,avy㭞~ģwuLx࠭&ϑd,q)Dbm!rFTr -%&v }P`c17 t&F҆ţEG*f1|)/߆ĺ0,nTgf%9f30l2߸i`餖K)2G]{d7Ŷvy>WNd2!]zn Y-?[nVNj|n2u3(5Y/׳^>l _2C(]zv4S˜Cht}?fm-d{o\3_߾xcI6<ؙs(ͩi3ŏ/~+ST̴x`aFD`E噥,(em G# 9kS^&1J2FdWB4(7Z xMSxD9'Jc5ԢuWx DsJC$U y, / 걠Q%Bҙ%LV#v: yVRKD5Zhzt`60`/y sZD)3ln%9 ,ӝ5U9iW ;&4奠kz92J#"( -J)Y"D4vv޾vުun_-6rwBr-hg-%;Y:Ki~8(h/ f Ìͩ6gܳrnӔ ˂Z34D`aH-FV  S$S(+#dm ձc0 ic`pؙ2L3,8ʐȆjzY4FK,zNcT{'40h7q4Si5%0 ~LTi.9KqxAC# xE96x#T/ ,(`/iW5u_tC]?@x$=-ʗ6A_ccɪ4)A&qCIL=κR[݇=l>Kr>6{Z|vѦZ yg3Hbf+F.X0j%9MʇQyUTXzH)|N{{\|lD%NT95{x7?.ҽʚӣMAG/je l&>M:@m4 sB!*RdpUF`c=E%DKǰ UHWdf]{oG*] !8d !S"e[wDqHJHD8~TWU*y]Ss y:RAE 2IcRɜRafXTqDDI$d+5C㞇,3Q"rʧ} a9 {Nak hP;Asd KO뉣dnF.sښ$'(IO VdYaTДJ($ggAVR5F8߳.:[a(v~5}qI;!R }j!k.M$y㏵*0 IęG>2.4`_͟w].'+zx? O!QkdmWɑNcd#L9B'RP;1U.椚< C]ΞVسN[l\L@ZX.*l. Y}4:?K\F~xEt})LHfTʾ6kDL/YĄ c8Dt9o+S?a-N8RM܂|Wܚy`ky~bUpvw0㦦a=Ki0EC[]q~'.hAam멹trSOmݐnd{7чѸQ@DŴGÛ9eto할'k\KhH0u+||qsrQJ nqf)}WtۑnPzf^0UlAڊ|_)Fe1T5I¨8[*Sk85 7!=wO?~{>gW{u~2 AGG @G NꧽMV.]SI0լgߣ_[a_)rݥU 0 ^z6VjEcqCIՍ&e80/O>K𳶃F%`;GJQ&m7EQb:UQF"k*ìl-K^y%*f1F5aY3 KsꡇP,4X4Zl$7Zxfq;FT,֜jcd Gou_Hg>ڤ5 0xJPY?(]4vp?J@RJ/mPqx}쟏ʵw~[m[F Q'0P۩Y78=F}X"`/Sp8>fN ZDŽ_mIϱ\6L-$gg/|z?9]{^]ےyPwYOc_S1uyΎ$HCWk]vCx%O <)=H`0- 1T1Za)5#7ws%i :Zn:<$WNzZve,AG,4Dam{aa;`j]*zCE /8:QUf3x&e!*ޫ'vKTlc"=E $#J~7Y#ҡGƖ tSLXP3yO!I&*n R'%a(S$5exE*V1*Gwc*!]qCCv,Kt^Wicuf^d^)WAHd 9 J_Ϯҗ:;/Y`!RA_Y6ZoM1Wo56<{CY~1$<[l~Ѥ'2Zx|TeM㧳'?y?-dT5:(^hTuTDq[Hz*6'χ/dX]8s| {30"~'Ij:RjKf($Hk#ɜdjߤ j~#j&>K0x8^ףZ.gpZFFZ|];;]AkzX.L1T)b+t@Hm ,^ wE%Kʶ~mFXatfTnZ"ōRl(H*")GP>IQvI w׭oBJcs6KsmJM?UwP^mc8t! b 6bmhk}v[ /_O(oQO5pf3߀ ͌k/ x>k\Dހs?*[~LLj9+r4 庁rw9F־'NFקI 3=/o=?8#*rcHAb!1X"%I$!_B?EEY`VI5cekiKZk$Bg)YJ"t&5P(^LHT8bRoF%6Bz9*ĠZX S#)~Oy[^Tu D]rJC$U*%0 sjRC˾ {#7;_p 0Y%[ra+]ԯ?kv$۫ϫyϺktOZ>DYINܢ4U}4y:$KB2;[BmxcS^YH! +\¿9N]`X J1,,VJ\|ܺ5vO$ߣ[dM(֢JH1 N9ťڂ`̙ st)]L_(F#4E~̵ò/x{˚Bٵsl=8{mc70`\8mܶhZࠁX,=ΒӢ,[-54иWpf73nԻOƭ;Zhd$%xX9`ϲ90sӛNoRod =!"}*X{))Jnhb΂wDðq I | 9i.om8]nPn1ݮJR-6I)J Yxg]=B#vWDJui>ϐc1HI5IXaؠxcvnk[fwLR7i`P=a~ hz37S)cDUIiy웻5Mzvbhtd E EݎI ѯ@/&,dv⽗^ž)~&qؽ:Rگ|<$n>:8WAz3DL98!umG(۶ԜtUh.PeB uD@%筍VIm0b4jG$\؄ A2 c*5Zl8z1jJ̕~e[g  ~k/`~6uLidj+IUTyD{rzח$ߝy]_`.y~kX e\jYD@[V(557*Ԅ57 ~<zݕ/M 4 .[5R[ 輏GZDhm%8cJvi2 =[6}_4m}~Ve#)E|@u;uNk8DB#Gj AZXql07E+Yu/e5MzhPT&~Ulod,`rvz8i7)ףm/m(iIvk8 ݪusc̱|77jFR954*kc&ZAsglrD`p'hE2mJs>DZ 3xM5WZ;i$: 9%P:'juNTru :3dD҃1W]UV}7WJUfJAW@.ss%zU\}J3f>qE\|c"(u%|,|='/erzh_~;jn3f9Zeܚ$("F12pP&BETt}~ m3OnR)*lJW9)1S|!5i9݌o~juƌ |5'!~uR F/ 0** | Q1c)NeHkaD  "9!g*%Ií8,iPbTR3.h-\ BE\ wVԃkjG 1<]nTrs0yeK,N0!uܚӜ'_&g6Q`*c&/ J)ĉ;Cb bf tmTjbb?8 rK="&Qۜw"/$IS&1iɧZ2G0?/3ѭO0pTYQrPG$4ykӂAQz& Z3F]gV:#74݃ߢ9N}yeHY- O6=_=s( 0PbCT z.#740Ά-(%ɾQ\kܧ4()Cqm\4`~er$y a%WG-9ܻH=+pQ0ERLݱ)pewq뼾6 fEظ\:$UXM.6OIEp}( !S8s})eצ4$5g[ik0~ 88v)'_hG?o`%$.fP 3գjxvkgf|Yp>1jJ̕~e[g  ~% /`~6Tb]9gj4O#> &.|8M]]NY Z?!fm|VLTQ!Lt$MA|\M+f` >ٰ*]o9W1lv[,pX|:(VݕBW\sU*<%+STx+W8GQbꊣBmyT?G/_ۻ$w?{w~_F[|/J!(!0=rn<ƭ~[ {ܚO= UZه>v 1!]qUM?cĺG:cSV|Cme1\@Y6 ߵ1/Q9g$X@!_߻C3o9N] x"K+Ҙyhyb#FM4[ԽvtdVտ1B Ӝ;v acJY3*<@ j QϬ9mR6lB5ii TGgXuG{wyHbQnE@jܓ.6`P;=456)ofB~Ј*2@%8H02Cp?^ʵwdVc,Z9pɂQDNg!EA"NZɘ ct,2OPƠޙ@GcM9OИ୍Jt@m.獯 c g7yƳ#Zcr 6l+so tNmhv>IS>N8(% >g%r_E0HJ nTب%.rd8A#Z pD7[ y={0Q , TQm$1L@y/jY3VS: ͈RmkR4` E4פ8l[J~rS/jRjRء&űQzJSr?WpR C! ^pG&+UrN{-1J]UI.bP!F)"ւ٨R\I]eWIH10}Z an~2ui+7*r4C#"qi֠fWi|NWӝb!=8w-]%ov)9E#g{Y:ge׎7w?a 90V{%qb]ڷdGv&#7Qk26N7BJSPkZ:Vn 'yNh^^xOrOr ^q! ºiҗ Yi`YcL{?\+MEKSA~Яs޶.i?Jؖ%+U ߳?Sf0`͘sgqsW97*IӈWp`r<Fx2q,΀<$5`Sm (O3vҕG~ƽt"e+ָȵB,<^($!-sׁ[B@tfܟ y83q&V>ܿJXM[R_5Mnꦂ\poOF"DGnFBjn#ۿ(%tx"tOv~Ap&*Or7|v/Z`}A LAfHY0crYD2WqHUƹL֮R+S}X\@Qs}7_#2s<>y"q_fmֶr_jy5,J1ec.Z' ֜ˠDEu5D|s>Lf +xM}:x+mϴgm~1XJU3\4̀8Bdu;uڼ|9a5\zk:ovVJ4Hćؠ13"Jj`w8>/G`5FUe,*&LstR)rLFts!pmFAX QR){cdr9:یi/SZ0j5^nYCD`F}ͩ33l"ÒnJk/7wTayJcЦ衣q!2h` U9(ٲDERAsi.Y}{k GAu@ٺ+f'4\ 'fT5&imPh4@M6)i< ꬨgՆ7Ⱦd0mit l6E|>V%u>;j)$c5gl T"zuJ!9&jΝ0L: /2J ^E$;ŢAKiH2BV5f>~yNLY))@3A;$Hr !%":B}< GWt}duCu!,NUDs+6q9efp)E5X5919DD*FDpl赔T{Z0WMDU>UYԒv@LH4)!71GīU蝠(+p@_řA= Ҽz$XЕCA/M1 iҷn $J>2!aٜ -Վ?9z/vR)]D:@IY u ctj;a"GH:U2afǽIbB;g>p>ݮ} ӑ0/|SJx#9*(J%]@ B[isVrR4QKTМ;o(Vˏ(l;]ޅhOd ,(7iFFkxD+6S"D"L Nj1p=:i-_bH"n~r1|Ƴnl eȵ!.ÕRȵh;vH) ' d> #2%Rv!ďF尜P#A&%^zK2^'3poʇ{.XyK\R5)Z)Cziβ$tLB@ZEVs{.qoi[wNWg:S,ɣ}qӶ(aq3+8Y][~7j+PCnfw_QŵD h.7S;cCցb7(.J7Ӗ54M׋<^)^\(VP7|m/O>m=g,kUC;sr>~ձ1"l0?V`8gRW=Brjg 5ېd6.Y[LxiD/RLyԄri.Z|eW EZ]U(s{Zbʘ[)`z]&gp:EŔpIp0hמKuA_ܳft ͓?o@h4}t̆5`N lDa.r*炗uA(xQsuFHlrHmjOaҰ,va*SeH)&O?p>fi@dB+,2ؠcoHǬa* PlVH>` ɄpN3%᠆u-M4mдSjEk "EߖJ;xzxv)>[-4az{7[۞yz6jBK3Fh{cӋO_fm-fЛ^ Ӑ 1NFK 02\* %i$ ?(S\w*oA>QkrS8!PT [&Q^`C;g|O@͠A3|ij4CϬXRR<x&ˏ SHkjbJw Z6\m5ff>ٰi{V+OtNϳ2"3ڼBiP ?x ŽeD sB';67;|tOB6EiL阠E#g)ǒ4yo[n4Nj],;uҚ.Nu,Z҆ۮ> 7at[)pR!6eRE M(e)ZSV0 )(B{ҥl9pKL>ѦThS쟸I(֥kUSx %5y 9^r%R\#]rT dāH8\K5"ƇZs}7=}׽}7}aiA E=%>DMQ*ǬPN\yO]JKiO_\#ߨ8s. "$2ɖS<&#BY:E9`6CU/j2ZA^D>o)k6cK ٻ6$ /!͞MrA5~Z)RvW=|8E %ReNsmmm8;d]Fz4-Ja) K/ZhϗhSvKw^*Np2M|p~1]lanR7Fqqy86fɅKtơ%2 PN!`"c`4Z )R1œvm [a8;'`JAQk͢>Q D2trI)7.p VVqfI\1$b9R yAURp9 D'h#'qpvY?^G[5x,N#>Д4R?Ā/yPhJ%K AdX!y/a &>)3˿KR.gF1fW;Up2+JJx|GZZ~?&םS#A$*Lr]`CB(˨@sf/:&"O >9.@N)S.W&Qrb)^%9@}8OGYQ*Vwh 2TZyі2:׸:aŅMʺ"|ñG,K#Bj"A ],Xgǂ Z q~~V04+PfɃ0I~lExB*S:LȅWSQw-6O|YzpF'W'L>)QFGυGWI <XC#w&@锨đg?Ypc)<~Za*M\eܳ^bI?p;*\A1vZ}UbMcΊbYg݇]M~n94~4y+[>%6rfkf&fPhVr edʚR\btR$+;xz@\0tҢ%y-dw 5H?ߍR ֩Z:N٠R='Gvx ҉Ng:)ieĥC ɤN2(]^V@-DFtT"(n-GpAk T:RD,Z[GYj;Z?~ ɩzJw%W*P yy\j BkB2N AR-lB۰/Ŕwz@E&$/f6DM/_5OAi~w:\8Lm`a,.$LP =&10L>&nC BXRYﴠ)$G` x"^Eօ$ɣ Y0aahΛ$Q&IEyPR kG=Y[E60ְ!CZC"vP$O,ٶ=.!Ba4Y/[i9 B1P%( qَ_r^?߼&%M޼ ?ygzG ~4D@|9/OFq]=7Zqwl7Jy+U#IqdEʲ p!o~zC1~Ph\kBBfMWΛN ^t|')z)z]4ӛ]փ/j~Gvd45vPbt2I l~63YX 'PGϨ6GŌ׫9UlշVni6"(a1F_*d ,RJQ$)A4b]eD.@/GU75|4h4 ]"x+V§^$?_<x1q]6n80(3Y`jDzMĴRןz~u>ڲA~olz2KUY:tj6f=pmxWpYsͳڀgV'P;jDzWՆɦA=ȤM{ >Ȗm:.czhsiN[[4~u@[.4+ hrLLRxy!=AW[\ O:Q bm၇4 G6LN5}wA\ ۄ;cըb2cX7x~NTiXAN:/܆^ӁFʋbQUp8pWN6XIZJ%r6PyLTZMā!E/qHj5m3gЛ?'Ʃji6:s7;9INKT!3"0|= ʰxl`$P(MA4p%W_;`j)Gt=|Q~2pdb/ȳ9dL,s:& 4E.3 e.UxXg#XN=*2>D+ia{a(=5F-gP +#LJ&(^Bd%A #sYg:Z/Ppp]{CM6D\Vb gEa?(=ֆGĝe)9kdilHe^)x/֪ -uZ|S8IkM':T&ji&4;c$:'-uu) v/V+~`9vdh+^m0L(jFV|PLs&R-/g0E#BDžZB;srL6(͓GuffaE>OތYTťmڍ%B F I:v.@BIi3K"%xKA`oHN;kPoP*FVG#$K3J1&PXUĶֆs?7㍆qjpn,:nC[SWM#C)R,RrOH(KCliqzÜHrxDĺtG \{hIN W$'E}qM|9)<]'E2U -8cxUvKC2P idAwxpH)q\D"FUv( 2mm8; 9qqHzZC=9L{wȑ ϗKn\J >Z{+L3b֏-ϿZ1e ZIK-L"1>)"jiDe0Tǿk0UWw\W8 _&TM>kdBQN  ;&grf*^Y5F%xu]%hO8j}6p6#P(7*p NH_KY`4bv~}7ŵlpf5_^>_]/TR!h_:;.hţohBJՒCLŴyr/+ NƗ.r̕7Hrs;K E(j':K;XLn? Dz'{j5wYZG Y̖|8'wo{8-{ed{lY%[LWy"1;(>|B`}Nh1nTl}BԷCooì7ťR~KWeSx0Js4]>Dѫ ?&튣\nyT#˅]_"H/{w�鋷}{ݷh e}>4I=_5M1;]wZV߼kaogC#7~4o_ZhuŁһGZew-өrR$Y7dNE؟2 $ \xe_8Iv1N IKhi "u٥JFڒ48*:WG暰1yxTT9K4&)k4V kdKQ\B$mB uT[OVg_{EU؅Cm ][oH+ T 0 l,0hHI3$%ˊdeQ6'&dX_] w;~~ R=$dB7U?ߝNyȻx';:ࡿů~ksp?1u^rŸsmS a]Q& GS =**l=rpf.꼦7M r~@?ETR.JQuHz6k[c5T]E}`4CWifv=Mkx(~K""ŘXr(-K RsAmߟhrb\J]%_ q9KmtPXK"J-ZD)wS _ֻhK;vycf!^R|*ցMyPRsǼ(, uđyZ0 -k!$HP&W6|@%"+1 QJ5C0a^FwUt^Eѣ-Hí) OS{/csL>TAR <S^/ /TŐOP|*7Ȼn1oքs` 0~Xoq.?jJc̖TL;sEeJĔN8]09^_+B=Y|Y$(Hy.o, ml = 60@\./vfU"~L=>D\~Ax+aDkDIiIMA[iﲸxז2N(+r2\&WS 2t=C*9) JRWHdU&SQWZyL佺z O>!u s:+ 9u*Sd^2*qJ{F2t+$WSz**SiUX^dO[plu499wDrq'RQSꊓU=WW;}zA.z&vs!>p>~/|fn.!.b#u&R-/g\d0 t @r}X*b-t|3*ІV٢/1P bTJXLJQ$)AaƑgYQSƛDZ+7WR5X!BeNiꤱ @C{6J" r7&6ٓ> }tњ,Ҡb)Pz5k!jd-#)Y]2PPv, CުjL>o x$$tx K瑍":* {I¯CJj?KA̖iy96]%)kJtc ]ц"CWV8IiJ=IXp(`r4?~(S 29|ZP!4*GAeLG=R+&đ6^ƄP0AReߊi-}[4ߕ~;*Z@e$uM )M!jKJAw&` VS?:AڜMDGTE !1Ԙ`(UN+| 6[#:YvviڗAjKٸTs'h*SyF(>dDgZB)# TB&2caEaOsx mPGnk-/=ΓD0a,F_;ߕ2 ۖK8oa{Ol'F ([Ğ輷Erty")ھdOm F :JkᐏK\Lq|Z!Ȧj˷]=K"4OtX4~iVF u9J}<9b1J&ijb (w[ %QMmrpkP$'?=U~eҮcs 1% Rr(y@!IF! (5d!w F:*coABޕI}A?HpJ'ShvĶbrR5&wvj59i͏nsbq/\f۪-҆-Z| wYUaR!<|s^5矧)eǴ,pBquH2 k 8l :%*I% o¿SޣkKt-(6ΓT ۩oz®g~=`:u@ GY \*/IDD02W z/aIHeӂ:hL)yT, hkxD }maM10f4M:Q&IEyPR kG= 9 m`lsi=F@(atV*j=W>bbqicnDxQDlBυ`s!w WaUmϥ ‹c0ǃ@$SB ]ZÁDVHu mm"u\|!$kLdp5{83|67B[nN͢]CO&v3wb.fIqIPKH! 5A)B3),C8,Yֳ6p |U5l0y4| 62p÷`L-FԓLjn=N -Y6t\t3h}^uI˭Lnn[o+Pg52By>C~>0 X^~Ɯ,~8l4}Mp54[o3&Ɍl`P3q~$gq3osǜ42Dy<-,;JK:цw{_z_ 64 6?@yQ^Դ_q8x[8ianu4ZKD.J2bJ)c8{C9襶2IA8929%&| &^])+BN>τߐ Q]=Ss_G?̠ykZ}|YYݞjj~d?_4V;b^hZ |@ÇEI_OlnǮvX&-PdAd|jsR4OgsZdۍ/<&4.kw$P(MA4p@r6{\v\ٚlّ[ڢ v ;vkyOd.($˜IkC( `1tW^Je.ո`G& Yv˦l0ia5t>4KZϠ#=WF#LP<У*JG') .lPjM&NAӁ~6x&Y 9X-|r`R\Y-FDĝe)9kdilHeb)x/}AZ\uZ|SkM'T&ji&4;c$:'-u iA^@vjFj.wZcl{Gh#$9a`==ZL~յ $#n9S[Y9@ ɤ%pL#a)Uq{Xʍb2aY-ȯGw=::74dK2H#)EY@,PN{tD9B>29r[tdxOpn|Tĺ-.GavsoBoOMtw:ԡ6<ؙS(Mi3EOCС> tV@r"8%H8A{gTLrXH`f?lj 1aě]f;,c@iVqV}=S3yly)#{mF.FK>wo*Fmvzɺ)UַZurU+2ZuP͑0z0rWĀ>ɠXXZw%78yN~|/:cTfH\Љ( q'$ F"9wgeTIb_N\g&g|߼oWט_=~3L'yVP܉w"@XhϿߺiHSMC{UT-. h%÷h5~}])|]*DxX.;(G>Z6xhN,Kz(4*jaW;/-2zQ ge8XF=iR;W@Y:UQF2B3y6C--Vf$`k]Z%+-b֩;tt҄i>:ɑՁ5X3i"Hn w0$`Y9 x%"T#PGg18<^:w:=5~ۡnZ6w\jrf5i\d,KQ*e eR+gI1i(&ήYopg=60bXЌ: t#3J{k>S "C<65 c ch&q* `4H:n,+rl4-xY^(F1H)A &cѰ^rPr6H )ȧZr:M-4nKi`q${ }(O"ߣ(c?tOE<+a Co0ٍ`Nw?OGa7r8 NrLK$ruN@(qrCk[@98,ٗ%n9bф~@[PeEoe S0yƖ/+ zIL~(__&()*,ԝ&5|3B7^BMsA~%PwCs/txH'n+MgT!ܛvEOф8y'd2RhHc!LBz96Gc1xƽe y| 2Sqf20A&o4.C0!UX-zg՘uZffM 8BZ"7FΖ_ )dOF7bVZpG1fsը-*oƓK#wuFǴ4.UNJ&r-i{ЌBCjX;5>2?EqZ NtX:-JotVf]ͼRZv{j\y 5Jiu}[7}Fmo#tO7T\s~Vz.ՙhVݯnvŵ_j!7cuHÏ*  h9ڧ<(*jlDU%>RX2\ ʸ딠U2E)S֌̒C$WSYrjyKrT;KOfɁJEs{3VVH 0WY'c@^p8q_7*9vL i>y` VR|Kl+,x4(ia9)DJ& ! cu2h(K񩠡H-Sǎ"h !51wn HUZjNVD%aq?8B-(*3>.I%TL!6)yuw1MGŋyuf|WW|i;- n`3ɻgw"9GtI[su]26}og1jgqsC,-6_K:飐u/9LӮ |v]ߛa=?t%gg90Px^G{U3B@ r04@µcPU5ɕ>rٛI)3D8!4 3z:h:HD줽ޠ_@ˏM.ouygIN\~iIE_?ˤ0o  BSfpH&'8-\Z}ئXˁPQR{ 'B>{fk(&7ww40Gn7+8jP|ur[yMˬ I8fggF%_rzjnh0)z B@6.W+E(fo!Gt_ۂv?s(sNîT2%0?z> ܳN'0k ݍzp?sXzSw>TYP\ޡ&=ih̷;* m>\t M˅v{Ult 0Ey^$ސ(e2~Wy|?k 3U $SFH|B`1#v;P cJZtyOvs.׫MR=`JYx(x)S#Pдj3εܦ#xhTCKŞfR%g#>WY@号s[ͩ;u?){5x[e9NT<2"%Wa0O#fis@aсE63$O)@rz*D"2&HF&'I5cH׶,{uc6Zq5 lz<ֺ||08bo+.!'dLqwC]. q(. U&ɛ0)%YnT,q% 98[YDlF%5y "z3qKʠ?Vr):\Z8y}|;yDlȍ:q\d>d<#ss({QSB$W:~oPoyp25\mݞow[Slgw:SmN_$h-ՌfMS< Q! $;7IGT^C/u&-$|km+ "P35|k'E1u2 d!j%V(;w;.~v:nX"u\Ckq 񺾆XVRހ!%bHHeB6=ʴ!pHԀ,l[ͫ%]*W6tw% Ҵ[, M6Vv΍?XG F"tHF}H%խ7Mt=Cic5Lң1$:Bɮ얁&aDUBP3MSW:Ksfzdžv3;6pTS2БZ t$WS1@ t[b?z|Ђ>?GNx?j~uu?*ɑ C/]&Gi=s?}_\¹8}sr(/=ݠo^7B]ت[l]ͽm(l=C\&aYvV#޳-m,+*V ͥgzU~H%gu+dT57،eI&i;UD"@)ʼnHpзtsDP51X M `΅S Nz}󙽨gl[75*D L4C]"@R \bmHh`J!y 1cT*`JFf 0'.9_dXe=IIJ<˘*ip\3ھ*P:h"hb1kV2!rS&U-s0bf[() mLo,'ĀNyy|\ $с>* 2yJ?GedcdUHzyY6֋v@)z:Ņny cVSnӯ,&㗣NvyB4MqVŤR[՟x%#*Vku(ɹ(M).FǦ5 G Gܰ(\)N% *vyRs@r)+S SW.QWCmPWemPY]8J&7_+Iawjb2~f{'TX$<[0x5;L>s6atQק)|:0D]:-=ŷJxaQ<CLPz ^JdE p!V[D,jW<]wYaZuԊۼ%g*"|+G{YX%?S}V?=ޜn5cKqX(.c] >mF`rXtBI0qFPJJp.ssM_Jb9en '#J՗ 3>;bCsSrrg;vY9κp`>)U :^fŽ2v "dM0"}gQY J_R$exE]wu`%&$\Z)"91d h JRI+5g9y4{y}Q^}_jl_;Ľ%kp‡>4/E+edu"CLݤ%"q[|p0-Ǐ}/4ҢVP(rC0Q~C*,+.6;a陟.f'e_oɬ^J+C{n<5-Y!d-YB0k]^J9;g4+.2QCY@`ۢJc2}8Wg.mDŃ$ lTP JVS=U+(ܤ7OjΤTC2{sO/ҷa_`ꍒo*|Fb oT9) &(|3}ֵ=?{kK3^̏}[qP),R/7Lz~ww3-)MkHHdF1uv9a; IDIb IM ĐW&B:kPDb\l\FV^)YUxq .Rk"!S R1IKJEI+B]/Rd^uBR<dCBYZ0rΒVq'/K?: kiЛ}dI[PRzJ66ԛDO{FKG/ @jJ-#hqzI7KԇZǞf['W0Tv{':#B<( Ւ+-ǐ:7Rqԗ=Jkǔ[Qx3 ^[$+1$bª&)`F%kI&ɲr ,A[cݪ-sD'L-)P n\ۻ]Ip._5ݧ 31ب:0Zs^yr2ȭz@+Οt>李ȠJXcR&Tl'KTj9g3$c h9;|)fǗmò'5ǝU-R]q09r̂ej.!Ȫ,֕;"DLBFCKƲ &iM` ꚥDGV1Qn)ѩ=J*Ep @Xjt\/FRPλ\ƨO E_fxg KH18 >I|LUn~{ӒKۿlRk]*4FUd{\XnJ I_ =Npv3m2޴hq*SBÝ婖aǂ,nUF,VTQŭkdXEÚ}Yz׻6m)|6`P/3mhz8g 6_Q<%~/iEyNw${so.t\<[iU[E9̽!n&]Y=T/яFô,f"}_6GŖwe==6|V>K -߳)Qs#uIdGt[l߉-u4;z onKb;KؽƱ6/S,]9Tmȡ"e4:TjP:>٥7[ @V]NTY 7bh*a?js}s(~4nHDklBtoD8$xv?MnEBwr6:ù7JP~b!X"FX6K cRLn yI7CF$Ct~GڃhUk#>Z 5%;睎}O}MBkfQhe<0bJkǣZV ;} ס-W'ܶcdttA9KL`@RB0"ܝQQt݅ko/e WgyӗN!_>3yj!mjYdH*nHM^̪Ȍ*뿩6 twZ^k8`^o"&7o#3\>IF8F=,%$PXzPA t %,jDKf+5m[R/[~^OTDn47[a|2A&;/ Vҿ^2$hp8XB *3<'aLI!:%VH8JGslܜvϹ:!#%9W79W{es39Yf$Ϲ:qٜ}<2- m #jiZnP+w9ކPR ؆+s@B\Gv_sա97^>ה_PwXAq춥@+&H]leEnqيp7͚(\VPT..}\ dϏ<\&?>ܽe,\o5(x=SL>m\6R[y([Y?oǍaQH1wCpZ週SXYk@xK4pKqzQa(6ز0>OA`#bmyg.LS/}aio6bl9` XEl$T6z!j_+eGP=؂rltĦ "Yi*o(,10فw ߿rx}z&fS#&c27V_Vw<} zNF%>`' ˆ{}jZ<^*"+XS{$TEo e%Ai)'evm8}y{A+vwh,O %Fa6Lg_|?G )(nwO5U<%1@P-0;oE|zњsaKSڧybn$ΏAP@nw!{/~&Vjr6In>9ypRԒC9ibR/!(. IG!@f) )!ƵO^O #(iϨ|#x\1!CQ<9ZڜqY0}(胁Op ًUA/ $!;@۶~uO` 9Uhp1 C ( |^h["@-><8̑0ɝi`]K?n=~ME젧{ |T0KDhXS…6}YPBY|O}2 /¾,ꁞh.,o6"}y Y0~Qȳ==FgGԾ^|}> ,hqTNI?+7s;rY'i~XƆx i) qeQEy)IJH+\DQzx@=*f##Ȉ=lz8E~a9Ux2:(ѳ7CB5<@ J ` I1JAhx,dCh6KZX ] T#J>ʱa1.y`AT9Bƥ)NSenϧ=k㑵*MQ9ۮYjU׾??NWÍV]<7)Ii^ߏIl;0*uٲƠ2f0Se$ P!dxQ #tv[_f6dPygğ{ЃiC%hCB<9$4t8 b ^#kVkrg h3|m,5LO=,$Ѻow*]˚C0,z,7:ErA-$".I= Di*9Q\`F?Tgv+4(\A烸gD[PL]&)g|TtH(*H4AnjDIk(W(zg9Q; m)fZR2] h~c {\l*UHo}M0r2q]W."V@cC0f 8rچfVlY*eH(ɍEˍ$A(!|"B}zi>vXb`WGX5^S {9ZY5.*|FΗˏI1$BZָb O 8Vbu Q`<]̗+]u> #BjǼzP(F/Qd)]l Uk:>R7R?í e_Y=!( Ob!Xs~9@NSfI ÍNPp:Nrăӻ;]fL/kp-L-2b{d0<=-c"6B~[zE$0A63 e[G[fgDf&Ln U̞‰L-=bt>\f"E)G'kCCCZk(Mƫ8(E}輯ҧW'vhF7~Τ\F,NNG$?׻sVMnt70y:g.q#{1 UM^avM! (e`'Y3l80"1AоBJqF)f%~NӰZ--3 (0mNøZ-LL%4 u1V"b `QݎOF Ϡ髲rb@UQ#(ZB0UU9tF o-; sgt$5{59oDr^cp_:c2%:ff˺[C)0ON`:praRu97;?2! "u\VE./YR_4iV=j9N+G(8;6xQ7v:B~]cahϫw'Qc̃i?a1E.P"C{nӫ}t#Ʉ/ (`O^_axqQ^YmdH.х%`TOCi帝wAܱ=Oܘ(i f)p`1#6fG$hHreqD2T`ŒG. АPE_m A#A6i+CWD݋ac2Uix.j\{PveA١_\/ѐZцNy9Nyl?|PH[Bcu,HΙV&@2UdXo ~(1*\iYQfբnR9*`骬ϽpePSlK@$0:,OrABdLrME'Ibhfb,hpL^v9?5׷0ᢂ8Td? k||De*@gt c=V ̜ ԿS sߟN-W)һ"Zl" E=Cn ') -!`)Ч遉.fpUY4m>uX.*y{V';A ( $gN)ъ Z&;$(ɵ ӟU}ag~ ϶c=#i53:A+=˜ 8I_2IAFRH78B/i|s&x"ER^!EӶؽ5n4hn#/Iq^bzX?w`h뻽:nYcHc+Jg I1#VaZ/.620<]Α,qO/x ~mL A@Fo0I=8!ԩAs^Bm)υViB!9=9*>q%МJXl2ĆPMNv>w¡("?8 5? /*4kX=j|q`j[q+mvhm~h}5QaW.{4J݂]8m/e+rK 23ϙ P%15ߴetջNVt:Q\lRm級L:9Zָ  OR.Mղ@0}OU]N/ b^_ \J0ќ} }~T㇞p-pG(ڟ[VMм֋\"գKu:.Qs$Uk!Ѣ5TU5ѫjyP1o| 㸨jR+,PG~NgGCCTQ6OjA2QЌ,S9ys͟u ,A➥q:%+LHBZ+ J, Y0̵iE !2k&~RFOHb #@b_cWrG2BëmO%D$n\ Ia)B[se`NI ۼ?{tDvϿ 2qܭ]w.v5;[gS/F ;:?֊؉ognz5?!UH+e=O,ۃ!'qݮ#um%5 JpE!oQ,jPU t/D x~:~$ps$Y0u$OޡmQH/O5rFhؘ$SAb"OJ|{ lCeʩJpե8Wrmoj>ۆ}nL{!|9M8֌yG(KDDkxDsAA%^l*\<0N܁5E<3ֶ+gsD/fS6 #Ϭhז^aa[7b PDCuuqa{)0C4S0TRwwkf ˬn?\.ȗZYP1NV`XEb}9uW27ۻ@KmdUrϭ'ȢߗuwC zVpkD1ٙL[qpH/[Jrܣ ¾O@n KA\>"(ٻ Mo6 B8% 9qq (ys*.$ܥ$`8`DXCOq]Ιheen 8GngVV̫-CmgPŞ3#pr0R=H]l,5.WMdY9eUͦ`L߫rRʉGbףI*6OHJSŴoZ#\S$%LE XÍ~ ˁ-nV<Z,#\!afiy `\(DdM*eBoSm8"x8] [ @H R7z: 9լ_S`J"( 6(c,+R=q@:5Co–nz]ֵrՆ_,ʗ (D*2}{QIe^Ua"Q9Hx죅r-20$x*fp3=>@oi^$,?ݘ̣T-G2=iOcIB@*Ja=2-'l خ*F' >/jTR^DFDƕhM^,B^Jݡ 6s&r7yFtReA^ӡЖ*ʧ xqĵ'A hѝnL*g7laZZ2#qmRA0iTr1ƙĜ |\) 1YRÕhX5Fn,uPeXˌw c=4h~;{+L FN0+KZ EZ s3N |+{x/m!ƣEuuˢW;̭0Kӏsj as+RFV\j>{fPG 6 YA R_p>O 'BH!QF>-lE]߽vzkI]?|N9gl"ddNy ]-0^x0mH*㊸lj9|^?¶TDNL{e^LRڳV?n2'#w?\{#׊o[`EͶ s0w2wYCkSF2[^<;ca¬WWƶڥ^?X;hnY䃞UH9KLq0ܥdQ,1A'm?]+"7^m{u{nG)z S|B0w)Ҭ]JW:081JPL]EO[ih納msSQIM5NyRWħOfQwIhdƮZKQ٣VN[`|_O:}?; JLO6 or_op#HDn:MP/WrquuffJba \9!J1Bzi6Gik5o]#>YׅZߩԼn)0qk`jbX?M=0ԐNt;x9Fcoڱ5Ӧf CU%w`^in  _F>$xa!}q΋"豑ffUJ`XUFah:sU}H KϢ=mV0WQGEw|ѩ 1ݳ"v49 V줜ڵ+ȄNdt^M  {zGѳBBH.yӻq*[9Wql1Z!b-+-saU>uK") :UcQíكÍg<u3-XBnlc)$lÒJ-ψ^μU4Ӑލޙx4O1ΆmU8FۦY D-i˔v RZRKPWaRɤne.WVjegF? x:d`0ĭObU#5auڥoa2/`r;MNv8_~|~78ϝh8Rsr!9g|boh0;գ 4k|8O o?Mc=J|?`AC3(VIT> G,sN U@t*\3l haB4* W18ʂ6}U$ D,ǭ x߸~2o\67-b=%MuXD%?$71E8vk"k”^E z[^ͯ <'p,$tP b?gy0 7>_@8e8 C1z\ a!׽fTS—90nuG +L+oL5MS9BjO;eȀ, ^Y2 : ɍM:F3i/:5HS|R}O/Ier0YÜ,@,R"'F눓cQZ -B:HKaνFX>w>:PG^^|zOV/'M&`(D)׎KUT2x,'$%*YbvҷR@E] xX֘$4 ήKKw WOؗ .[!V㺥G#g⣂h*YC!,7y\-.bh 9TF1oFj.se5̵fZ T2FtdNq&P) L#XkU F/gä:by|+bBr]+ysDySᔰ:S ȎǷ_Y %%Dپ2k"xJk`A5RuxlQ56fƯCD5r@\RYJg[c%]6v-4&?I~x?^KX\Nz b~;9/k[ ??KWxy}-?~:hGsYvr%' 1'?ٹO,E!5;͢ k bO)hm`==g 5=K3EX.F3L4((8\p j(x ms)1< %9!=7;7S^%J(*l]'6bߦ轢ؗSKĞ#oPz"]ڟu]L̫h#& r3*06蝌V$\q/} $"1 wG\uiiCc 8H.f)` :`wČ'V1OIN2Nͻ7%+@6'HZk J^ O`o20kjCPN'j eA4FT_-;ZWN6öZY <ꩀu,тu>2I|F jIҪ2 YZX#+Q.llm٠g`kiNyba݅SD 8J3-yNoiTɷ"Yi4ppk~J~Xho?nK DOÐO3:t _?Z~[ԃ6[d+IrKp8[n¼g4"Cш8])?WD3cUq 0.se.l`Q*zDjg7p4Ji0! 59 ҏ^Q{W㸱0/IpQӵ/'@ b;!nukkIL_SZM-Hi$=Q!sZeE⇟iPkq_5174 J%QeycjL-g뽿&dShNss A@tGwqV6iiLps4Gv?H@PGK8BB]rjCFvE"-qsC47a4}oKM! 2 z:͸<'%dQg2l:, mE%eޘ~&!=cP ŝ?/93i‹ ~_:ɜ۶)d_&_]8̪ڿtӬ Qg߫Jg`wYgv/.9eX5\1bU9ZΤԡ%$r2܀&&UL9u\(Ճȍُce BfzS,.+'i2u2r̮/Esԗ Db@6ӜhiD;gy Efl>UFX׷y. .! {+%&Іzڞ~ sXEzt?y"z %LخM93&gfOpcjT>7qdT nH Љ+ֹDNH?X6~吏\l_iOmT`#hS2͉=lN#)]h;1!Bi峌NESs ~G Cb+n+nv5\v4B2֔`l3e: xM0)F qD$H%.\'U6]q1z.'" %4.f hnк)4li6WeӫR}mہpy*u!n#HxiI18OYmg_(m 7^Y^oc ]>T5py sR2e=E6a,qs&K8Rz$cjŠn 0X)k?E 6?>UMF;RAkQ Ovfovҩii` KpÎi\$KwΚ5Nwp;K m'~̊uoL7 ۋ柣%7JiHhI #P˺sO`XRȬ Sيu}w=+fjV=ۇ 51@cR3%`aOXfs'4ޑ9!3t1V?V)&T /"DC8phqp}ܦ $A\iƄWbU_DasQ2 F'Ec:實ben'j 6ٖk2~.R0$gs$`gKfRǚK L0Wv&\q$2s-Di~%(S w  ʄ u S LJo/(wqp*vNJ{bhWMWz~DIX@ 43Նx vÂl!L vvcLl]a[Pؗ(f(1V#.ޱ II`E /.zQ;@IvNmꦈLKKj.+3jQC`%@VXY36 17%.Tlb%`3y $dwI&f5oCm" +RH)NԄ\%<*qs$,? *^*Q_? һ|eVS㳵\6e:;Lm) h./qM>vxm hFprF}Də :&l j/-1 '&DՌgԞ6\IAI?|Cw8;7 `ZMOgk,&局Ť@r bӧ"R\ T#+%ZljLhǃ% XZ ckt M]vplXv;Z4 Ip0*#o3QaこQLP5`⌥ˈͲwF{dB( Ze;iWCɚk{i0h#%@pA4qÉ2Q5x׿ɤ ۍD<Á@{j7YOw%Os;*kEиwWm7Ur\F[\B" &+O&ԝŚިڒPk'[=N`SaQbEgXe;f^$ D\Eȓ H2qdW n.BA_"~׻ *~p7 .#DzN7~2 ?G ݽ26:LUd% Ҕa4FJ!=> 1 UheZ4Kz1oCĬ7 Wb~d,Yb\l5gțL NDc VV/Z ԠTQL tqWWd;־6XQiQ fuH[&;a]<䘪\( u61.u/ڳ˳Y(5Fe$scs&iQ9[C_qMjѳ8yQLX+ʣZf7BItrV8ҜPweQެ?_&k ųZTvR*'d+/QW@=m7qbm)CeuLF%u'\ia&ie5- :/ut0kGܙ0{`NiZQO\SI9?mԻ%Ē h\f0YaNCN u:?Hm牴x:}iyouѧ杶fu'j-_eHXŅ&+2BӫH4D3@;<&ձ/u Ӆ: jgW --qe*"2sgS:;Ճ6Vo IWAKjqJT>FPV0^. ecqLqo7q-h? ]>cIApt]~Ub7OQ@͉0l -~v& 3.tl:[:/9:I-m{v{ ujL 8wn6U31jQe"j1}x$daGo,AI:kZ fILV**Vd5E~א cGJUaa^/(3Q \^oԹDe\t ~=)D1?}{}xǜ$$c Z#CLpaJMFx8áwem$IӘM;CzA؍yX4<-ZD;7;^}՝[qmj=[vOauP+o(s}M+ b]}kuiVa>QMC 4hlNuUT]85/oh!4|GICkҠ,&ivtLDĔνˢ* b&L ,h؎8O" *wšcP D󡆶!+$C^'{G/15oOUP4aW*})y[yCX{L~nK ϒ#eqLo`b1DZ_iخ(5.e9&v1O.N9i`2a"Uٔx(MΟV:^qñMd U׊PB@}|~ ɲ혧{E>Kn0ֻJkIUfb{0ׁo4Dāno'U_,y%T) >4w]ز& !]wC%K*/ƽѩ-\wlv¥"kh/ 2dg)#Ӆ4*ʲڲ^ZJE v|>{ evCZ 5Ց-J U} CM^Qܖ5^baE֓Ԡop~P=&-,^G{lQ%.Q˿pLemFNrC_z\_g㱆˾d;}fk"t[L W2(OQ1M_Z50<ń̹:uΈI8 I%lYcL;OtYO|m [ A[)$i_hDOȝN竜lhDZұ /?DE ؟wn_>  [x[mتǪa&͆U7vح.Ǯ:Iz GV!g';aZl };;%Lv~28vc6l+]3edۥ)9]lcr2QRGt=sY!?Iu#YuvÛ!ֵ>.PJ2N d̓FrV9h&^.f}f>T=1a#&23ث1"~[{'™]̆b:≮*ZC[DZ¬|Ctc#yC)}>"˺fEP=i'0;aꛕώt0Y>)\ %m)2ڵ8T#\j~|%Z );l9ykI%rNf0je=R˕!-74?:uZ&olh4Sh&֜ ݃KfMWDGdl(TE| `u0 Y7 `.wk\pE[4+e SC@ʤ72iZK:h!+ /;KZ/4yj g&dhT(9"wmY^%Lh|1z,뇫|#Ҿ9w~<'#1Z 11" )1é@[ָ\} +*Yk#,˵NvEݺNx݇U ; nv$~~loiUˌe}yesV5nYha''9n{P>@R 'C0\_h}.??|IK9H)1N'@D7WZKѻY 'k* -`$ ?Ьo3򤓔Y 7bbKܝ|ˁes .G4S^ (sΎ}!%P&{-WԊJ\d2>e|aݓaD./ZBcny00=TKj&OJfz6|~1,߲KDc{LVyz2I>O4r +Jزs"h1/7;7`esnY w6af,v#Rz(8Kls6NT79uko0[qūZgd0$~8J'|(*|ڲ>A50D}^^EލV}3DC'y%5>/R֢l-kEkI!7SqCӃKFUZwdM%VtpCnͬ"!hz־G_oI5p9bh#W+%ˌiY{ѧ+H{ M~7+DN&ژ FRc)i?e'ڈ!9 {>G1W؃W B0NEӔxP=a?kb`bիՖh߫A @L_.T|vqR]fj1|9l]7[?@RHc| Z.1~>!L‚lD,rL*-MbL,ysT TWO~)񟇗J%XmG]z_tQq hr58U/\EFQ5 AfF`/jvU+Sϴ8%[x>MxzN 1L hqH 06lIs-_ Q՜<(zXňd2_D*zjq T?{^8\)ֹp|fğh\ObX$yKMb@=29.%`LzҒ:'mY%$VsT"$ bzF00Y:9/ XJ((^@9%1"v H",7:@32Z0ӆ;4O盃y&e̟yspPYD*+\k| TZ A 2CePYYi֡on>;7,AftZ%b|Ri?/"4f C/-)Lc[bK R–'0cyN&)qLS)`r1JqŜ@u@O&|> RKnoY6n2;saB9M&3@ -ړJQQtRS5OȄpLXDkϼ,Z&Jv?[}6%߃lŗϪss; ɐ:~O (IMwwu=P(CGP%ZmQ-Ū0Gv\{ô"wp7RD2+#Hͤ <)c]%T*-sU0XIA%GfTjGvEՐ~ ">]IΡE`6bo6F+%iT7rƉ+N .憞;!8m ,wEuĎS }F$7@aHEƟmHj?0s(خ;Hg4,]`@!2EzAԎB*ٗumyv0xԎԲkX AAMI b*0",E F@aaI0'IvhٱJ$ vyQG4tǼiBdlEWhzТ茎عʰ)9ے' *852hQgB' Sx~jXt#;β'au|XZKM:- !td7iʙ;I&\΍Nh+s,@:ekd1[lAwb7 Ċd1N)ڨdb*!CSkbSᕨInq:KQ: x}sP1n /t_=jvZo`pHdq5G+jmVb$y>,Т>-C0q8M.ߡ,۲tJ:l&(p8g7C|8Gyx ě{_^,27J^}ڜӜكi9$Y|-yFQr+ A8Q&mK΋4c)%QG yך[DzvL}x|rab}9'g*baJн W+Z >CK[AjUу~a 7t ~u6‚C~5*zu$}yu~ڧwz8sK5.Į3_/0g/nsD\y $0R$X'7csғZy-vvv; 9!(ag@9щApgX=+ #xl'hm!=U3%KqvovaX)IC*1 f5;hDRsHe]a*֎CԆR׋`o@`GӠM1'JBȌ 5h7& wӏ`K} #ao;qVH,m֝@ J1K)Ձ&-Rm"UbNaLJQmgM˺B mTpkXXzYΤ݆cPG76)2!EgC"@%8K!`mu+9ȆI4{ww+>ہlr&?l`~@%-s1ِrg{[yA=EYLc]/\~;"I@e)Ha_[^N^v|ډ:0JW|v>CƁʏLqHz.I [=W j-{AAIH! Eq%CYהlXd sGpJD~ϾNQUt%/+8W {Uxj' U-WrI^gRUP)B WY- `.gFWq3# |.-ۧ5͕%T[I҉+J͇tq YJYgdTWtdS{ a ~Z!ŦL@?Mu r{H?}y n@-KZJCǷa-(T-QUk!%j](hdpb/B-D<*߼QP_oCV_ydLbiʅU <AABDiS`nB51J;f%4Wݲ[sz#)PՏ,HQ)̀~+hWT\ҙj?߾sn#<%[!zbG UCZs&~7֩NTգW#";V݇ Ut\@&etץ2u:X0&ӯe'bֳՈGrZuof-]qlNʝLa9g@|O-5TQ[xbY{j172hן#|trr0oհ/:ś8oN;M֝}9I8L~5 vz>=j/wg ggډSXJ }q,Sr녔UJa-]x.)0z-o?},g}ȃECc{DdkXoFzLġۡ7*UJǕ#O>͡ mܡI94TJG>Y,͡Ej"Tt;Bv7)U*zE̱P7hנݸ|{h|pҐHW̖ <ҚH4hw#o3[,*(!jŷ'{?Y\x$mCu8oF7P,~k: 6SN@gK&Cz,P '\x4'ws$ɘpk>ȜjG[; C1:=ҌΛG*ŏzF|3:9"jgt{]t-G݁LK|zM+eX[Xr 5e / w~n-,Pu:Xp54`D-,α"`vV{|5+Vfĵ4_-OYqwz6?զb=ze:xt)È3V1w`yWTR vvW6q`v`U0*l` ;ѮVz ^*VgF BOTXKռTc xw{)z) .޻.>!lTw96G#H,ecYBA֢'1yOV^LLm $A ,a %DjMH.KpY]w)""3.=GjG\*`1Ż{>nՕpTV~x5IU W𷭑tMgQͲ ,U9scyz:GxڬϷvS"(ys8"cˠݙaA{CTMڲ>6ᒫ֐)ɺ) >x" u=7wF(ǐC"Ћ,>dKOm>01K&ކ =;c2t(c0] vVr6!yI^BIb]HEB@F*n0šuE2/x*O*D%TOB̲ߧ)[;9~Jw H*䫏f6SvSś,ǫVt~O5>R,ݛ?~*Dd'^)*}Mܧ.*v}D4#vD{6H&Dӓx З̊!(=^V,i^55zy.C(),^ɇ >^'ݩ;uB@3[x5#:(9Km9qo, (qX.xL C]_GƆhAF~DD '`{PȦ ƋKLOmr@Ř:/mb"Kk]KRYҋ돆ZbS .Nܫca:2@.Iwu3([%xQ׾EڑyE\C`Ƥ3 {vv.~>U!ot=2zER~п>U(!4 }0r{I Vyr,!rcgHCߧ!cp^WlW^{qqL(^LY糃`" &؇е5UtUl}ʝ v:爢HÏMy{qq~.ώFK,$d>yҫ tzuDE1ZpׯG2Y7}F܃6`p9r 65ף3`/:tNzIan lIBreHc.a~sZJelFj:[5&G,*mBu6t( tJscֶƖS4 [N}WVp6z;kxƫT/ҥ>]=5 .cA U5}5"&uMw!-{nbcb;jt'*KCQ=q,4W FytK*ps±-t9TR C84ܤMH_i\.j\SVmkOk1dyQMtfؐviXʿ*Q(R[6# >jt5j=GP~+@GyEkIe'V_9(Da_oI?{bsQbI%/roސKW|#!oL ?,Ok ĸЁwءpp{4Vmu 8n,Mf hjJ=nEu `T*j ;۽PErrxtQR+nhKIkz6G8ynFj55<4GV_cRUo Ɓ~`,.72L\Jߘ4n^g}xMŋ.+}nF<vkaxFCWVuȰG/39p&Y>wd*PCGFqVH`[.9g )e zUzWʡ¥S/jhٻ-aB,h)>o|I'ۇt'͓#l o8r!"g+sjU $VdeEc+lcp hs/;Bv}i.)mq(`%q ƹ JBUX˱}`U$[8jvEؚD[ F:Jo0V>u^ l.JbPQ]}l6B 6P\{>`tDnfl`K1!S=a|+$ z|W^5}_L?ķ؏F#JQ9b}&7~o^3Uhg[m@6W@$va 8fPoflAĞKndK6[Oᏻ7<9}e>[L-Uuqml)=X]ECLVgS6F*1qJ/@ %MX"$!jiv,bXťtpXeh #DcQL JU~*{& #FTw".j{qNOR GySjM#FM"0ü5oj=`1ϞhtK'#A>`uQl$Ag!K)] &X܈#P 2@u=yusI$em(_Es\`8fe{^yؽUލ3NVlZ(} XeKVZ ުcW8Kб*,Jզ\M$$}1=Knf@bV9{=}{訩x>w9` h_w4ӥ.ti;]nJJ J=zϚ۹QoFz3zԛѣ,FSO"bESd1ap4_6NrTyަYHET*vG{FΨ&;o4[|81vє^rDx^AAe&5f \ڍo̕ۏkj:s{6H>L4Bsk"FNE1P{e(s!GtPGұ(n< ~rn7SSߍO.FSwѿ>?,r~ѻ'\0 y\]V[3'S|}v+|;g _fQijk?zNIgfSm〹EAخFjNcHAWc‘0ȬX"cL|>%>į#4~U]g_ݛtFXl]p%ZuztD3"VTd5MΖj4:]1&KSw該ߪ42B(pDzJ#(uJsQc֧ͥp }ͬKEjHYY AŦ}@7ګi؋ϡLZcۡdۥڣ[۵H&#{&L׺LqF7"M~^VJ$&K쑲B4mRϰ'$ 9t8xKm0%\m\hy + Z%S4?^Ց4kt +B\uS2v0rfSuG1fW'T:,RFa߫X\,J[\wQj֭(M*ޜF CaT.ҸH#.T9YaP\Ƙ%[b^G'Y8JBwiJo:*[$]&t비EU-n|*UuG&\5xsQ\SZhl5QLWj<˾j8_`|}seV\{M9Nd%Qӫ 7+v VXQav*UO![ AŊ#Ď.mui%7+YlV81*F̨ +!jһ1(veHjP`ydh?`a9?{{t#M@ |Ovb};Baqrv?g{Z&l~Б-EWaoрE'M {?nB[ʽ<>UĸնĦ }T~ow+ N:hsYdv+ tDVyr!/ηW6/;CNGdWhPhg/A߮޾]xuu9.u Ϲ,z?LI|;(ν(G&6 Tz.Ꙓ3 Qn\=uҶBػ6n$nvlGCUպlklEɱ~!Eȡ5 iZ @oB"pJ/!*RޓOh$l,8KjdFkb%EQ{󵴢sXЇpUxYTL wk 6 hsT smK2?qD 02oP)b.a9miи%$WMѮ'7u_~.FY/`D\ H8[ ^#cm0eLPY$)(`ITS oI2~%ύ4 &J'z 98 TXţN-e999])1: `DrDWѵ,8Ku,(Vn-B.ՠY+ +)J\ n3JXd!r`Δ10<,,"pƔp\y Ɉ %{.ۍT۫uYL~V ABIao)ko|3Oo(L=ܢ+FlATaϺ;wcH!"F2rρI՚#̶ H& SRF2 "d\f@Ckwd 8+O gtňGhE<[jh&Iqܺ ϴ\NTv%ח?xO"uL䞅omb!6^CPo m==Sܣ&-Xt?xI&U(ś CzGM]-F;BBB;eIQz;䦖0}raRЊi2H][r !TnVhC&dj%HS\`ŸRss`:?^\x'7g7,t8-r?尐cçňój/--=2n6[YW܉=Tͣi| Q+j=qtWut# :8sM/oCz6&HPu lM/~;WW Vh%\ݾ t~!t&_7)KOeL-S,k6kԅ/S9L?>O:32_'an:={sqN;V14Ouހ YZq-OIݎʚL5'ܮ,۶!։XmňGkcڒ6ft25*6<.J05j.彼Pw?NbFYR5J~Ignߊ}$JB%e%\`)Y!2SίqlF<]QIX!;؏ہ5~Yf:e4ivWhB꾽2Q^@wf݀Gb fS 7 5R,^ܾIoNnpIBC<ĶAˮĠw?{KT1-gPeUy?̑f_s367eUT%M(ֵ, em#߸Uo6`@zfʀdQ3Bw=eY)cjӫp-a%ڡWpFF}KQC+&h@Ewt Jk5oKNa8 ;%76BP@p ;aAIdӗ_;/j_9(Ws.P9;!._2gG妅T>ψ> PkD^W*W6KRNu%2uSn'7R7RM2u>ZʅTkFgNOF?cwuN#ۛgsRWm#g3\\xsE,\5T)iӎM駋E+ZSe.ߵMn>geZq~ޡ-g|6o]fYfM,8-E]X="f27jy=4WГfgٙvq#ʸirk{5DnxhK"-rq4eu5jGX"3;jM돠~mxUտW7^V!*ɥ0j"P pY)콰0e6O\jߊI ű1iZdUڷh* 5ڪ,Yme`Ȭuesy}UfoQ._y]bൎ4lYw+Ej拉B^'^KEN׈`ܳVj]hCI=959]A=?_􇿜Z],zxw.\M31[kUIdz=cXZSpSB!z:9_7&yT.pƠ"҃EUT `N=cvg_HQ%=V.ÿ̽9ډV3}sܷP{4ї_x2zyG{H7tdӧ=$PڂB ,wraPXr h;2Eh6@T0@r gf|y77M>IZxtM.,h Zi.Շ 1 <,'CQ"J& mԛdHOjD.2WRkLtHv 5: AD/:y#VjTcA=! t)%|F? z[);]V^kBzž;[ S5oav4w(,n0RI1B{V IPD[*"d/"#!=)yR4ND($-i^®zZR>R7R-e]M. ?IK EH jTwm)fgb֓7&%M#חe¬"0#0ٙi7􅘦~8S#2\_; PJ]߱TN%i}Qbfc>^n-;tꪳ0=cFf=o|vSy!ja}"MgF]Z(?L5Xz /W. R>ZEvU:ǽCuAOVY[H =12Z*AjM:a]Ė'KU؃^هtx<~Ho!T[2-Llb޵#m> 03=yܗEf&L&[ܖZvK-q78VIVzj(H+-.0 ]r$?95hHj ȼ5IEB}䴒^IOI͚!TGB^WTUemH0DN19.gv@Pd%±#d!(\: ][h7ԻH' 䍤w i= ÔO:Q{CF!K:v\-FPHSj ,fg|j~KWjsș5^{f}ȑ`luEM)Fo!ȆdԱ)U!jpR(OzHG͍~S-+VD+~Hfa5 ȝ bbHMʱ{b2>4*UC"'*֩T.9WǤC j#$0.x010[~ 46UN/C$S74VhL 08mogPvdHzi}q/[<9Z$c7s-[9K"vR$Pg>b2P{ &K[}j&Iܲ(mX1S|ʈVW-S@+oM&'Z[%EAItC27O:Z#_WөB i8VPFy4 #^lNfO?>[F<+"A3ғIδ2J k tSk@MTk 4.]Չt錬ŀm d9LƗ- }bSyuuP¢)گ2UN(IF1jZhVduے}|h>h)~+Pڑ}y `cH57-8y: ͶF=kJ db67R_bk<]l gkj,fK:;rM-9ZւrOw\op>dO jpvC#ODAnv,fs`.(p~kѱ㝈Ԁ`TKW- Vi-PŪ쳷C F(%_Ce+FyVъ]$KWŃS<8U#S@WΖD5Dnb>#3Jg(Ю@5Wt}ЎhaGpv==I aGv=5h_N5[4 EæImfæPP~ˏ'n.ΟΗt ٿ˾Yn\rtFR_1~36*dkQMۓɧ__8>-f3h ߾!Ȃ}L,?8hN!t"VX uW:Tgge\Ta0F8|mvdz9Ń'hzO~Gb|W؃yZ=>o0~%1#,(*B00.q~ˉBvӫ#0%X3:@#%Q GTMMv;.15*;jJKi +ЮSaN (mC;b#X{LNъTh:uƺ {D/u[dQpQֺNt_]踒Jn (n\Z# ^="= &=bf6T#V,j]ܕz|xDvvUCs]C5g%R]K ٕuu7evvo)+D@;:1vKQ}BrhЎhP! ,5 ֜qr.h׭48#6qHRgym.Ю@ͪOPvivg4\Xv@;a&' +Ю[Rn&PLeajIY!v{+\0ej&@cH{B) d <P \5JEw"n/hu=so[Wڙ} fWkrWZk$/_68'nb:M}.v]X?sWp/FcP$~6]\]>}kQAV1{65tK]<|S<'_YGҁ:Y'>ңkip>i,5n 8FѰ>ph؉vFYŶX(S)M 5a@beS.9+C@̥y-"xQ#W>x^Bjzi~^$rgE솮ykjkOCH Q#!op~嶱L7y|+۫A׷L{K?Kݷ9&6!sR\TTT.9~L9x;=e?[hZڍܵc8iptry2~F+tޞtȬs|΂? ӛAF?:f2#Ih<'LH:yvh1esgeH# W,a;Jk68']Ov|WW1tT1㲾@XOV \c3 d~{"uH6geBՆ`(ze㣫! \Z뇠t"&Y qHx $NyP%G*IDATE*8{"@!<# /㇥=x)3j:i|#ȟ\L=:fFl,܌'d/p('Md%2o1O.֐jN西e,9<[OMޝLAAOIVȚAؤuyMOTNVf?6+NpV>~Z._2<䧻-ot24;gDdAgLw@/<<%?㓛p}P㑺cuC<逾eCO mӽ(^#:ǯƁrYsP;"K?oǴ[ j8-qzSZ m*)I̭{zq~K@ѡeu)tzґ*G[]Caд]M{ܴuv(8Н?s_X(C6l/G3yE+>SL9 {'ن1xkϰ gAX~R.u?V޽y(KWӾw|lݞb~L\ :cQ%qaQUvg4)Jhn㈀f 9pp39y ,goȽxnwMi\?w/N炦Pj{&>GwfvyMCq7IFmn>sO;c?ERLDi8'6CJ_6uYVӓ?k߹c;7qJhǓ˸`Zp #Ht#^Vg\d#䣍Sפ** ZnҎ21 -[{ETaE}FxE?E7^Eo[||nd۳H-d-o\Z^Φl"$d-ܙRs)*6miHti>O k@ܿq`:+o}*!ĔSZgʿ.Npx i>y|W~7y=<8FYi[v|O`%ܼ_Mbi* tgDNW씜nezPٞZ^lvz~P G%_h\ʎլ;+4R0};Я.J)JR(U]d\GRj)Yo<dWqEB]omϸZ(ѳt,id-w1OyWj^l2P%-8ML߮5mmoU|ZOl֟?vCw N]fΪO9XJ32F2U~;w6Y|x& KTf++E~?~?~o|;._y >!7iNJ%hc,x 'p%`ZR (yo(io8v3%"w/L"v^yKqna־tE"v^-%6.[͏>8܂stqIK <ԇ \Бէ˟5ZC{u9@mH8uс@ŷ]V}Izxqa=nH,ڧ{W%-*Wu9D2 U+ҸZ,3N*}|Ep~MJ/Fm[aӯP]/xlu ΟP}I+^,0ƛd5v-wW! Ad̐N' }4z fZ8B] ۛ[ 9v ~ڼP7vUP%pH6dt=Wgͫoׁ|9<rF?]0KTj ʙĭ>t^60;.Ġ?r& J$\U8QuEgP+N)B >8/?[3euzz8qD@Xa` gyȩEmD1n~ER.<62\(L.zr&(e=h0-MD TcO{>CFkֲGmt(&)P٨~V64pUeOw=.CCO][IƏY_1AicJJQTOi}ScGh֚;8F,40h%LZ[yd2_AǗoZ#d}Fue7 TSjX*A`5F3DRM1C=JK@KJ~kzL yugr޿T0H8/s 7<]sPx+χt7= &e(;@Ԩ>{$C֟>!rOHgl:&$j坾RK>nP'9-e*_';$ҴC( BHJtZCӾI;(̠Y B)?CCGN׷}ڥ/k+^"Gޒ1f׺9oq \ ekR$G:\$$=Zw,, V߷/0i۹%jF!FޙǯyiJ5}҃:HVW{uBz9wo')|XYMS&eocԂx%ђp46Z-9~]/BΏ@͸Rl*i, _ىA9 8Q+^Eq٭3MΝ$bύ\:ML OFWvII zm}Hk' 39Apyi` yW笲E4* ?$GP n-<1Qd.=j.]\ \P7OsE[.n5 8f[[]|axGb*7K4aork˴v<:_j撛%7c^^.?'7eS]~'=j}9Z回@bj#}8тjmq֛>l\8cɏ-+! wH~'_=a︲[Ϯ ޓD^0lU[uNesLFJvfFbYq2l$g;65RS#EJ/)=3[hH&\br) &Ffk Wm͋.,ΑU5ルuNj$|<\]N㵺ڜVsNjKfsڕx tHEDXKcL6 dz3֛O1/χ)-B3C;W3[ kV'^7'ظ枆͍n_@vkÊi'>N&fxi@ ZpQ~gZT*B { M`8)SX }H$VPj>$`QmL-\1zŽUgr/oFv|˲N)ArWbPǘ;FݢÉbj9vZurt ~Esg;3Pz32Ni{g6=5ͧTgOԂ\=D.U_^yLֳ<ןkpƋub G,4+/\Y޹|4 \"_ċ9,\4;_^;&Tj.&RR}Dpn{l'HPd`G kC׌{O7@I {KKe8<v_Nð nyx|p:ĩǗy"{koџ8#>q&Gs8Jg[@Wx6(rukĂ.='bsE* h-z} ٗH`Hԍ.O~>`}3(ബ~vJr ez!<akw~l;2n7j/wQ驿koƅ Esju61WSXeM\IWK%}†09 ;.#ga#؀O 8j 8\SV!tI8-.Ơ]:k)&&1FEQnv:s|U#1mxw!,7` YZL<0Mx}gHɞIFipvώ3(MfZ(Az='41{C}B j<[KkH;5|ʼ>IFm 1ᵞ FDiʵ%u64% .9a!CdmO}vTHδI';?80hȥւ RiNQ+a"Qpâwb|Ju #ǂP@XV8%5s>)'3D2*!\jQ3 %/-%P=Qʀ̈b'1QjS.s*BpS )0z,qf#щyd,d'? PBxg!ew .)dRiitx3 =?Xx`K?]Ej䷨F9 J"f<+ § ˕-% )S)b 2ۙI"̣DA\TFĎQ#DE5Z 6gU@ʼn2*d@ŎABLp`uhݨQ.dGfFAҽg]PT(;7\.i/3Zd /bN(ߔa_zO*JO}jS'(45^j4Ǫ9r]DP`8F窭*͹rOTLUR_ܰ߫ ڌ "t /!2I'5iKkQv|)Y>Ʋ`eFF^:AghI]&7n+Ɋ&0[P8R>\ŝ5цg%iQ &! r8tu8 3j( ԡ(3;3g3z-ո.w@JÂGc1}Qc F(I:ġRD*`%OѸZJJɍ$RX>pe2݅u*jΡl'58"LG "B^SDV+"D+*5(M= ՟OSc*0BʆR:n}7 tS(^O;U4Z%ٿ+(\\S]y`d+xXm(&8q"QFR4\҃T  b͟%#Iƈ jCQ&"b4  i?FT0$?:I^$*|$Ht~Y\ \ I2Xpr\B`g~+Ǿr~+Ǿ*`%W1('nLL*82 Hq%G8R̻lՆ*Ճ\p|`tQSEʓK$T?(Nic?} u)F{RCO/<K'3OǰXw|Ww|WE_< cB߉-*B"ō:sdL3nCˌ #z#UdTm| e|Jέ=h9UYaĥb*DD#(F ,JzOكRք^zPo µH"\blEHj\;*CD[ cIAUGJ G -DH#LPJ|BQ!ǘƹTH0AQIb` W`XAUC!4;LE%,WdM++8L0]h$[NF"f9!a-r P"E{X^r@56V\ v>.W.?JA}|&&wh@d hrFb"F&ŠӁujZi(UQÍO(aE"UUPL1z~X-*a"@]K`VCݲ,,b!Di,! %,1<2|~F(הe:>ZoTÂRb!#$YUS '!;rwe?*9^*)=᪤.bJQTJ mz'x8)XH̝pp0:n6Mҫ܎8D ݰךa4yA2L1- gS쬪`A˳ke"tf9]6 "ưmm8c;F`^4}chj6dhll  IᔿY SyJvn!׽{Hr};Jgav,4pm$ > HTI @-U˟,Li131߃8k{|@_2Uf0_!^z sS11[Khp|y9`[};&'NpSt=ch9~n>x:;n͆qw֍{74sT8f8ʽt+> +v>18(404v)m%ܖlsPi} n]V1u@K!<m q;yفZ݁Z <.ԃ3@(DaZ;F;29ɞt9<Wu?ÛùG.GH*nq1iRY&9D%gYyP62.=mVLsf' ݢM/:61gK̏>Vɒ%f/gSoR7E% *)J۰^l$cd nגŌ$õ2}R7{Ω+leʪ]!v|*=fZ`=mJfLσ0: J:Ly 4: "ؑ6gLZr!K)k$xab;!. &.se_~ (==׮Y`nL^Dض> \3.JCEZZlvfn F@sVvqWnIbB 1$"L&ڄD1Zlce©T!6&F1<-LPLad0':QwЉ pV:/?p+A%i!! 1b7Dxh"p9<2oҎtAf ^o YHư2R'p0aRh+֤J 8:$Q@ )g)΄0.51h^WWI/8^uRJЄ^J'ZkJ 7Fj-YVDz ;%C9]Is&e$=2%%&X0@QjµYDNxSA[ 8Lb4,GCf z0ҤƫЎ")D;w7. `t (Q6 t'AZIlc2fA'$5Y:ޣ= Vl05Qia` 3TƎwZ3O,S( ⑆%IdUV9#øO@DKęgٿjez"{{G+GzEZ%=Oyf!@g.?5nS[jS G7c 笳J 1d,y<NxR|E:ywmƖ{ƚ%fvs!:Y|?T_(=sQHBH'76am($5Ӭ@^0&k\dEه=I1j3ٖ%ޕѽߵ+_d=_;SʣL{&#`ҦB 0k0ӲKgc LQ|SX\-7уEDKA2"<04Mƃm9 %7xj9B8fomqVunqnv~*$WBrDg; =4~-A5aZ3aqڼRa/vF! rJoi0~6ps*Kݩ4V_,_,{q;{WZF7q2M'=_rn|³NUv~&sW&07Al.nY Pq4u"!3 ƤE~O寙 :MfL`yg_;gqF=O uz! ,!GzG'(FZܟ tƦw;zu~U;3o *N߽}l^Fgŷo_;mՠ" {=Ϟqk.T8{eߎ:a^ ٷ/Fkn@{#K{0u>ycg){$^M7Kr,i#F72r8De:hvw-nd^c:3Y|8Lxak\<}]/rvʍQM'OO_ư??^|xGqXxܽ~nO/eIpf8\Qc+3g;:>oEj^/xx3LFӟb`Рŏ9SMFOx Pd:W ߽_L]͖2 +h%n_λuj,΋N-|M$,ΘUs,sȦ.2Rk\m<]lM "܎KZ[Ha%FJAagEV' y BXVFvf—"nemn10>u3o*O|Gќrgf{y"~gc>gu,,PJO/*q^Ty^Թӛ~<|sgT:zWIj.o9Jj<3sd35p)G~d]svǭ &.M0S8К%fԋŘ"T39'?14w 5 4eX{\'ǼVh!$È-7SYĹD(8%Ҹ6[Ӎ. Ĺ2[äU᚛mmC^e5xQ\(8k(=Pc)J~$W =Z=XԎE;^!Wotz E!ƌn\olxkA? v}ZMN]!G>*kQBvsGX^k'0M%=v7pP@xBy8Rl $z(+AB \s?0b -s < iCj1uGc1}q=QFcV^b eyO߽8]鮆Oy&w`a$Zxd2ac`AŊb!eAQ-Ko?i#l}oL߯wuXA=? uJ'MN*D !H)Rj$E2B"MiW&"*3_*F"XKA%V*97dK6.i\jtFltROn.˜#&u`f]r6!j|gĪPެ%%SZ/;X%e({?-Ma /tYX?@ΙAɁ_ M2 r1Q/~0 UYg?^ 8;} C*Q!amKYmFŘ/;4$,Cp`|@kƀc;~-d%nղ}3-wOO)?!8KrWSF@S)|),Vx'sy-gp7M}l5\\ ;H`Mkӏi\;g]w{44a^g?VԞh4U4h ]]Fy6\t"Β@&(5D (0:CeVK5 AQbK;&NX[v'p Мիu:,q>T!Bf'кA:Ps}a& 4+dz3=DMḆ,ι Ť*$JxȸBe5Jb%nlrE B11I1),rIrXf xU(J/6 q|Y X-˾$y*QPeD 'NG YbZW*K Ee3 B-0䳹FC {sAŔ&r%?tI߁9lquL 1I3AX7!ˆ#ɺq΍aŐP㴢 fqPb-li-SUpcS٥# iBs;'B4n ('vmyXmVANijrv=Nk ywv=SiyD{֌w|N#R`tzQ ;F*;[mCN͐; ?w5N#4 s>zꁴ~'hGx;!J9)"،yR#Q<=d%6+PyB[V҄m`ϻöFu&I4(k ~GZxz8)C"nԗ ˇD<ʃhxC% ;Sh|DpǻǶ*-{/pvsRHӟhj<4srg7>ND>9)i$G(6#\3izި;8h~ZopgO{\V[;3[2^fypcsOc@@5<o(ݥny|_xm.JTSAWpxYVyJ^a:Ρ=!xr`Õr`?x4:ɧS'~< Iq cqIB0ɺa@XcK~< X$ +:bD3 B"8|9Ztf -~g{wRZ℉՚.S$݊z\{SU]0H׶](𯒚;`M0P,tvK&V~jeNhmWD_kX!pU&7Pӷx [W~Wůׇe2D DR9p1-]vqI.c8j--iBm6x0Aw¶nTw~l;['lFk%}9eo>)Ź,UwK*77ochjO;8%}JOX .A҇W?&Ez㧥HYK*PFʯ /n5ֆa^^"+!1M "( L0ɕ '-rL İs˛k^ٸUuv:UU_mէ'к1EkgЈ9Q)QKA+1nKH&T9xpG!IVQZF9,10F r{3c,YZ,;X1z X|VSPVT^]mVWw˰%[hRL'ߜ)昮}.nRܠ} Som}aR˒r!EnO7+}9-Za9 ֛/˖9vGGKd_iW*Nr51mͮuλ4q;}gݎrk-=w!ܪ' y"Z$SSs$ڍ8P EtJnzSFnńj>$䉋hLFn{!lܕ9f2!l !RDĄ@*TL!a3eӇ˖f#̗Too~ϗ޸_;݉</N>3}$ il.\b? m Pme2~}LW}WsFӭ(8L 4H%QKbJE$ z&4-@زV%BiQŻq=ju=*mfV0[n>,?IU-4]kVo,vV3ʨ1@թZi3 ˙u3' HrBBh3x 7KuorF*,CFt[Ipć\:SIR |~6LamYIiL^#V2X,֢#_2ƬY.i5]>x)31NmԧUqu[m)_mp7R9bٷF޼t"+D6Ev%f$2ƜhN[ndm 8; xL ݉k77i;~íqﻷ%4> $"F)o׎Ҏ6P%= {jp>7rԈ8M`fa 5[{ptf0:7ӕG4`jEZC+Z[ag$h;2=޴p{ HCUNPԯ yQD:P R@$j31PZPM)x GWgڲxM!P`S%~ztc{x/Ar}z:gVl:9Lx9 ÞxJS@~ĠLh%F X99 ʭXA:u#2tXBF*lu/*ly,hD>(@LZiIR`RT=!ck'IC˿X,o[p XA9 ]lР 9+4Ӡ035DQeį0Umے$=$E.E"ٝ~7kވVK)RhSe:+`*Kf#KEHpGV:u5ʹg}Hٯ7{ Eyc'j|h'.2Ś]ŖfJM3VԞ,vpK*w_cE0siE +K4=q;2קQj4 5\X7[!)7DN?~Vū~~Z.}Xכ}`XPu[5f@7xe$h u3w_bUM[ :BBtF'bIatz{CMPJA7$4򽧆,^V AS)DPP#qhD՝}H0MCy5KL`QP^ɕW6"FT]=@q DSr;Pj磶(iTu-f{04FKbjfc# G0cFht' $9EܙY+fߋW1zyy}n gxA]/tL%E|@]Κ}WKU ] l<>܉TFUˑp9>e 1V(rL:ouŪ\>%h w~yiغY?+c WCr}ɺ\E/"Z!ǫ3\9=G1-c̘X޹uSn1Cw< d L ]šڮ#pWg[Zc\3@+vTVièPL R }ՒVl.!R7ir ?GM. cR)84\ "a^"l(C@a,9>D%`~-m~pbxR]v1H0Y !!^vWJpk<>abhT 6 $c$@ z8򒡽 Jm5hQ*Y.6=^ G VJJcS%NC,*}9h/[upˑpQ[4^~2_OcX*kq {vz[55oǛw|!$~y}QeW񣿟}ta/_!XPu -~?Z6Ryp_OaUYιZߴBOņ)<'xhn"}{_)GpydQbEPcrиص3q/ے3Yȶyk*.= nSwaE,-4}ۜaDEU0V kEЃ2n/NN$oyDc1(]ĀuJ( Vy`Ո ȗ3˃_ 19l}ɠRWZ6EK˦|L{|lŬQ'1@=9Q>$-c̘(͓BifjS4 %%.5OiU#8Q& z#M+{_iHӉg38I|Y- D^oT6sD<O UB;?N(@tB] e>sѰg:?[nDJ.aW( 8a MwN ;᱄pAA!Z+3S_Bx_ݱt8׉_)?q_Vºz JX0kAMm$5Q,00:c@erAs &(t2Ic pp#p`h0ԫB6"&q/5z5gA_}8 -waaI[nȸ 3 pI[caӜw&l*3s7MEcby`.ͺINK~66lc75P*^^9퉷/JX<"^eXfL[vf6.U풾? Nm1Ngý[[5\{F=IFkfńڭ)S:Fv6[1ڭ y"Z$SnݝN NN?A3iDY \W PiU<҉[]7 xEB`; ];(C5"쑳D3wo%\'u-V<0 4}ERa5:8GDz~ *Ɔ JJ*i J(Gc[/06qK9 ))H{40 4СmCڙ>tY$sOct0)t wNiӉv4֡R!֡.>j:9^nU!҅RmEz㮨>༥)p^;Z-E ۤ:>,CXoiI;b7ul7wӠ9ϦҢ-[Jkx2 6N=WGAݼCPH}Dx} z7GG2QwcW{k: [sZچb^/]`h}_ĥ,aS$X/HP`5\,uɰUŬ:H;ńqB^:KfԪե v2TtT:[i8k .Ev3ZN}p*#duTT_|8Gj9ߩ8$pzR~ 8T;3[ {w*B,i&NC[Prw u  T\"?!ϩJ2EЄ:'OCq(XAq (\Jr-am㍋;Q]\%FM #x> g gc&=CwwT35#RB}.&uȾ? [کDϫS0 kͺ}XRmsNjO43AžSi!7bDb8}_w!$ oF=y-7EM M$-P BdP0Q +/+kQyY]) K+.AR it])0ۼ euVW^F[^]@"*TdLiWZ7]S 0]8N0S 3ձ/y :jBnܓ!gvG:_ec`$Q<>$(}"8że 2zL8;[`'̈fK6ք vXޘ*N^3>|1!^bTٖ0@bL~㗑|',-$ބZ<ٛl4h iM `g'XkxD̻w?2y s@9#˧ND2p_Q%8 ~R䓔?  Q*zES3P+Yw3=j!ΰ6JĜu qȖT?74! t%r';]uI.NgX;C9e{ PԱ$ѽFO'9M,pnRplenx|XA}MS Z.Uڸ WX>(: u^?Ƴm ѣRϚ:lwVaX(Jn;1KiJy ( zg NqINS7b4wa3X AVѣTQa $wD yoy#07 ,Rf2 {Ȇ7k!@#*h(mx 5B`Km`YJ@ْZ8Z}+c]Uyzl920|'3Ϩ +r/PrlP.A E!d@2a+5)XFם{=il.i~.ql~\N_5*kUHQQe320 X IS`F"NՌTvX3!i"xbzin20_޽vy8#5=Ӂ=]{^ƙ{NOYfcz  86THZw0w[UJZ=/] R$8 8RbGqysR*oL/#z0k36뫽x^ݟMڇyy  0 7ܣ .W,*~+g<=b>@FHz{jw[/D*7[g?OIX83[ tЖ4!7^ln\Ց`zoo*?s#gc.Xa>벵Q1nmsu/12 _6VM Ļ ՁSs5;hS }3 բI<5̸d#h &~S8P* "x#pFFgWW1Qʙ:V@Y$ i((sڏQ!aq*1D@0Vko3o0hZKLǾ)4fL~&Ji+ n e'~GP Ɉa#i XRDcL3  JH?B}%q$Q剙F+C/)`Řv-`ԜMNũHeiөh2V}(*/<űV徂syFԝh*$=.S&3{qyH=ŕ$}d^T|]Q3 ؋7P9;CYwx;2% wOwԫ79rT}-يA.jwc:8vzOϏp|^_J]iж{ *}q;I@kl(_:dF*vP ѦVQw.j_,*` Y+qY_sĬrkv9K Σ[rhO8n|&=Bev i1kLJzlg,hIOXZq+e68OH zT 3ᜬsZ8Nٗ؜6 0_ myr]RОWIV݇U%lV>n{x [N ĠorCԺrC2Eui[Um@-U{huRsڞ߸鬲8uhoTpOΙZ=s*Z?s9eaV=RNͅӁ都HXEK,"q%yaM6.1]>gj[#N孒 0SWkKdiRi܍,3/B.S) iySX(xD24$2a D6%<%H€ $fBE"Rucڶ1ZBĂvo1=cjʾ2* 0_F+ Q&Q%`Q3K"TZ6 JS#N8h'!( Sy(t~Epwg';T4"$h^~zV0T<0ژ!m>%,3Hl 7] A{hπ E}kp"074f88c!vj, j ň<;-9 lbm0B۰,1˰ &d8Ն3+t8qnT@ޣB_=Z Gk2O!xZkWy9\2 . R8DuݱEi=6zlTcŎ1a>T0\A@BAH`A08dLYJb%KM2Xj:7y.:VN.b *bA"O?L:pmǜjګuP(iXDqR .(B΃X"A`_"IqU3Z1UY;_-n8N2Z.KFGDmS]5Kb0v+|SC?A+ݐvޣ?Ȉ/_DzJk΀hr5.9fN^Z)+5>`ujJ8.sH^,S΀g>IyP&~H$!b1^XUZpeUԕե-lfTBp]7]jQF5 Q{:(ׁ"?R&<s3A{0'ю@:tj5WaB :  “H! !t5lXB{GN)FgԮGڋ?@ȋ¤oBבJ뫽xkkowczK t={p"{<ɾe(Ro.d>@V ?޻_n/t<_,ׯ߮&n.Է~<=`q]=^MzM`mڍ8K!vG ̭ɰYAdRQ&W~}^*:Ei::tTԡX1P0$a q@NH$ C@% DBF98\͹`łx3 N_.ltXg$+],ϱDHEEV%*v`T ,R  5]@($9&$!aT-3$_ĦD6t*8L5LcFj !Lr`#QӴ1$@$흱{{]0%r9b%p6Dt^]] C%wY&3˸ghɌK! ugXxA me\ aǞScBrgUd$&YY(3z(#"OīA~]lGimi[X.Y%˥z!5ԨdGgs/$}Kv.’sVyIP0>;'6 2G678zMi({bxN*0'uzN3vYY}EeLj30'2 ΔMi2G e<-Fb,ͭ`A4Oe =b+N?-nw#ҼYpQbk&8Ǭx+) WE$ F,=_l!+=HO>6>bPzl/JiϩSz@iHڼ䘖q|Gm 7NGj¼ cچOv87#-ہV: `A:绍#ކЇ1DW-rful3p,\ hs3]6Ȉc6{8ml-MX[AK C[ ph_gEJD/^Y(@32G~E5gOoKV P0OLzf8ijV(d@0k ~DLHΰֲ%q K]U.45)_cAYap Eszl ÀH2i9M[E]{ٛʯ4-fwmmO V헽?bA %* 8w(gIփ{}Og|z>2.vfn~t&nԡ)/fCp_ OPk iC,4]#e0V~%y>6^2tK0G%xeJamȬ҄QAҍ>V//_ LY\0("ˢ\|'a.vo#ǎ&@E>!nf>W왛)G&~(*|fɴkLy5x?7ysφy;{>da AnOq~crfԼo9{_?-޺C? {3G$=m_·M_ԃ'7ӿA~Xq_"$F 9Y m8΀w~3w!{ :1'3paj1 !lU dt1b'?Ȑyuo z_ ɚh}iq 0:|jXѿD#N1[b1NOۼ)[|]?fp=SpgEC~ |b_x{8uO<}md@g9Z=)ht)"3V1F9(4i=v9֌u` 2mוLٽ 8ZZp2dh)G %mrh V/DВN81TR?lQDhHGBgSXӲ߸p OſU)|-qṢ_p:yQ~"@PH=JJ$[AJilz)DT H4s )ʶ"ՙ̚޺d7Y>ŋ*;Lho_ԷlH>ɯ;_,YTٙ!A^6|1M@xz8T qpO2#\ 1a,4(_>^6|IOq ${bJ%Uc'ydaDof^PTOY]@eYVe^ZqT@ sx}wۀ,h\cvl.Ȓ%_$|.u`q3ȂtHgR„Z0E0Y=$ڮ8#+C뛫X Sxfnj~"U#'u^*Ryˤ; {5@aI@tk]jAkIn-Fxs~`2;8su ֬B >hJ燙yh9 ǓSÉ:yڣ)AHn0[߱NqRuä(%iЋ(D 5 O'0Acӊi5RJKR#<6}RJ(z@@%"oYBq =h/&w̃LBtu?Gqba:҇in_N+/8`(]kw]3C̅R x2ԧp*-wNERj}|_VAb +gpgoc{\冣i۴Hg{[ߙQ';bt7!L ǟf0Wxus>w0 ^^c sBQLv {u,G7 a_s=`AtRp7w3;w<N\2!T2c(~\fӯɠh&Q 2v_҃cǴ+')Sc_NgXpuW~ݸ[B4w\M&3񎏳 F:\,~4y2)e(LOGʲKO& |it:'}WtH Yߵ?Cey쟜yp@ =]6ӏ"a7kY #̲Z3Q^Jv,~:ϳ20j592)` Apf' )(\HvDJ݉OGxϝއɸCf@]2b6 .8>+"?G 1c'؈`sΐ1aƮOgEOn#@}S j ^d'55q.uJ/,8ǟd40.yY'w5;|Bv*?l1'ك? r>p+-=r`[a]oHW}z}>`Ic-do7Hy4]$s &iW]UU؏pegUD r'^!wTT_nlgJnly~=ꀝ1QZmK*%K{$8E&-ļL$ɐSng#P0Gn-Lc!@ %Fjy4Kv?wMu= Kh)tIoY'f/Pr2U75V=nMPwU76t+4]f(aM'7>T զp:+\[s󒅤=t& ܏%ji:.oSFgW $ԁ|ӑ6^6%e).'@#‡՞8FO>S(]gPN\7.)4\]Xp=Be Hv2c@Z-H,~ˤwI{R5g9'%1- 3CSNOC캙D6{TH^sf˳1#Gb_^2ͬIy88 jS.j@aÂԦ?27Z;Z,P n읣"cơKyPo\ G< $;h=L)WqeVt4-)pVΟTPG۵֞*%حqlQG ~Inя@ TBYw=)71 )z&|ڀ򳜴 yP? 1 >?j}~MSŸh9G,RCB"^uO?!6AxC(b=v,܊ * ,晈P%Tx9֎뽣eX(pSgҲiɐ |`\xMrdDZ:jJW[=BČ ik@CGOjH2}zQ8>y7t~ɣ=9q :f#%sJ0aʮ :P!tBE}A.K2$Xv+ڃgPdO^m1& aSzZ:8D-dg`ٙ("S 0 :P (rѤnЮ?g]-Ky_: >1]=,}ަw/ S 43sci lRlOD"xJZ30:`."j9:>)97ʶƾ_E:`g2@tg:DuAܶEӬtIRH~WrE6Xa(`GĚؔAb{2 ;Qݭ?ND9)G2v߳;)vy埚N-(I6XYi[~"S A 2BIMA4`4GN@w| a(1ruNTfC{h{t3X{rm@W G94@QRն=p14pYm&4,\6_8/7~y c1KzڀIl]i}4䟎ӣӣqQo_Nb\k2X$zMZ>_Xt_׷NJf:#?o_b}):vV%Wc}z#'Mwfy)'0 @(Dp6=YxNjQ}9%PAVv';Ϗ~Eõտ.XvyrGG/Qjd6ژPOjR~^`vuJV !&Nٌc;)!^˜h \ ! /%}˛!Jm[8X$xYB۸Ra{&McjJ$T qֽkC0D鞚M`kK{* `jQܵ' \ѷMjY[ ĜӦ4mvr(~_Q*i2}͔<6.9 sBz]l} 7σ1>Ͳ0j`ˉWuk7qrߞ=[зR@0wB-Ҏ0E{(EQdv 2ٔa&p=S |2Y'ʴ ZI¶;$.11g|@@'yDd5zd#tHf2z C!A @L| UD?B7!ߛľ z< r$0IdCA($BHb ?^/*QlBvBX^սʪV{x`}-.gx +L>#yU_ƞI#1e& 5EuJ.5y^:P "I{f Ot5RKz 0`Mw .gT^gHPơ|,/-+ U+&xr7֧Ž10[JN?yId<_ [&JI(?;qyr:pۆüjDr8J.jk5 R(= ]3 (ɪy ɉΚuQ_ \=\PU/:]ך> P \][=fCf ]So_NnbtoX0PtBO<;~>c^YI2{N&)'.eTTY, A:`֟ pZEsC K5!@ ZN5n_fgOi.K^8mrU4GdvccO G*5 )sR`Cdc%l0Zҋ,#-JA{B0(1B[x%Ek'xz[Bڼ%9MSkR0lkۓ11UmObC%M HJGU&76K!!|oG$~ipүNZ0!CW:e4O$ƒZ @c3!Ge{' ;:<ҕ <5'F@ᘛyϜəz$23qYtY]v>|V(j9CuyRލ~<)sĴ8s>gqFz?,5ͲKU.odߍp>}Aj.lŰqqq@ԛZ׫N\ëy&}19H5WzjT )͢e==˧דJ) U %;8/>;~^(% qY `1A1~p(>'y*P"2f3ľis\"IOq(}à j uwτͻ0g Z6bx*'! R+RLD$1QaдwT XRXߋ F4qOLPO\Rr*wH AI9A܃<:FH?Px@@BQ;DqUM4Ɗ )?h<""{ i*F*l7O]FIM- I0c $R^8_T34&(cB/ |z(aX$FqEsHH(ABsBU!xw="OAx#nڊ&^ښ++i{Bs!ZD TZrD28 Xs@DOXhmi}8M֗׫(= jK'z]}eGhz4х lK.va~vI.GYQ.c+>LZI^c}+%M=zV T7Fiّg/ lZ[F侍 m%xZ=f$Go&O`9LOGf{F?lVlU-UϣYXʅmOȚBoҫRZ Sy΄5RI3;h^wtDPMC,GS믛h/YfTџq BUOťvkK}DF.̯OWK>KpۉrK5(9뉕KG) C]LkJ"BNڈ1<$C*Lyh0>|"9WtmݡZ^mX1֊YV1lcP37{q /$1_XRǎUvaEB"O..Hزv |aڋѽ{!JRL>"Uy{SF$=%F#IksĨaqd푚[bĻ(YbĤ~FMm`*Ijd;Yt|@}L?Fi[=˰NROeX֚S}r rTY;IJ{69BEMՈt79|Urh4nW/,ߘ;qC %S&̈́s:egGYVTf_=Euy&S _fwZwH 0UZdK^Q=,%L,fm砖-P12V:Qh ;VXa(kuF^P$LpU8=ssHaZΤϘ5LTռ(ÇrpS~0kUˀXGSSL6vʹYVmtF`.ƿ1_b9jxs6~Wf :ws,? +ޥq.)`TAɯn` m~ V-[ҍ\s*D)nov$ ,5p=p ^JA EtrD B,8#7MhvkCBp͒ZIc=1MtHY:9T܆|"zLi\/5ORω(N/gD鹹ʿGz,!~^:cZX*d;f5iK;25-3;vذ)9L}W[‚UFo^vͅ𗬹rhs,&oteT(cwk5,7kjreYƽHLC1TO 0<Qz}:z?z:l'&H9a%a))'{ޱ~z9I6T"/A:{\S~]w#kSLZ`AlC+xtL[^kmtCbtZ^Wt=DoJ2R ){8o=CVoO Y Ղ:w53#Ӻ?`I|+q;9;& c>j~L E&Hܺ׫;0ĸ<2 y:rC*Jk/7j1_͹nϝ|R w>kڙPG @*/,qJ aϙrDž+H (cЂPcÑ$(T2 h7!.^~Jt]e7:|J֫%s#{NBJiuA!hA+I!I&9" ,ub9jM\-V⍎W~T8笐A2-%<2h03r%Z" A4! "#v-#(B M=n." dLH%z(i #-h dGB܁)غT>;$s1E9 .jyI,UaR+``ˀl)8J-G<]?/qM5xz1o^IED  : pYIey  Z^ƬBs+A%J;Q!1X o[It`i oS-&)%NH' VaIK4Ģ#޳GSҎ"W |`vL&l!9ΐK6Z8 ^aؕลWE ɐ XGH^_OQl}QRV#wSyI[^>3ekǵ/a_c$_Dɚ_o?^GaXVlzb œB?ruۢ3;5~q)!=ze,&j<ͬagbf8G@n1F$ 2/S`3~ZȎd Sv#+{= 9UTn8tTuvVdAwONJPXH``Zs`Z\@ qDcCTPQMcm3k)H~oGAk 1-fBH@bS!K\`8p8bb  $ 8㢫#ba!)d-(z^JޘV<0K UN%ȁg1(va_XGY2c \)`.!HJ"!BxDh.J:Y(a)a2O"x2f vcħ< 5m#h{OdDG;?C<^βұ$W+zLyþM='΀.ҳ;5{l!U7FޚVo|Uλ[],qfu1ƯF?߭V/^` Zx,&"8f6Z>Õ_0˅7XtLaRݧÙ؋Q ߔVV)=[dcԶ#,I Hl #Q:V[Da 9\w>y=f z2>‚C|dz 㛫OC$\7W(lQݿ3IvRJ1< aGFA9*ab)BX< M2>LwMhZm)FMgxau'Fr'fVn~13z]:Lw^o2K }-oJ/BPX3aQOmYF7K;\U>\>˹y,P͛Do5O]+ b-G_(e/EsTP qUv+GJ81e- 42vFE}ZCY^.: _nV)bF@U۩f`,oqX߶ `)9)S#RqJ-ׯ^=呜mloӋ'B RL~[\yCK^M Ɨb29~ro_ِVg@y|/wxUύ^G0_r9}{L(&$ig{yUKQ֤0Pz'!,vM,iΣNaj;;`!hVmU(tеҜ0:*$=)BR3ZyI-a $ i5GHڴnu/I"pHLCH$>K` cuɚ2Ji ZJKԢv=Er6(m)h!yHtG͇8hr}3Mbҩ5P{%Z&URL= '٣Z|PV_]q;%zM"&Kag#T]eQ&Nlg?O׻k*Nn=§%zf2T~-Pl]s2^'pP6T+}wdz+wo $U1ΡN[(d V \&Dmly@n9}0yT8:Bu-NGLPbLB4CX ݓg $!%JN|:I&5QʂA}bDFPFh(|1ha48h cp!?Hs bVqgo:tD!P4`…X-X@@9Nzh@:TNvq<PǡԢGa EDRX[=(cgcʰDA w<+4aKETEJYhL@h A Ug 30* _%L :x=c4&g=~11o7 Į b%MdF!0ǭ#&9 O*Q%9dWʔǹ 6|+ g?oߥ`IQoy՞"Il}ǒhu{~""-Xכlo3Hخ.މ‰)%4DFuI! : 8ٳ шZ8[FᤍR8(]`v(ğL %I򷲁yf,j4 ~,)=︕Vt]up@lMâ Mg9C:{jMN_ Ӱ ttg/X_q)̵Ei 4A+&i'^ &3ĶpGt͔Na2pEtT~|uo(zZoleתe',>ub9*8[Φ0'$+2˸.Jb:xPpH- C)p03U"wR!VhcWF,-uzH͏%b^6tۍAs_t>f\mgUszw#ߓ(ޕq$Be1T3Cl`3;}񮐧Vt7% ;ut <+3̌Ot/-R jlK^\zsݟ*>aVwU~RĜcrDˁ6"e͞V*pRmv q9!hW?(QPsμJ!+PdX洊Ч1tii>0RbT;mY ׍Ma@@ K@˻pzgr"n(nhbpB.@ݷpd#(6!.A.5T=Zu-ZDcwjBV; q5!6˺=]ǖdA$m `h~ :P̈T aA4)q; \s*$hQ^)蚍`H*@!ZR5ïbK*WͩbLI* ŖJѡzGgEjwT M F8nMOu \=c=6aylJҸ")0N3!w RzS 6O 80be4(cmɨ븵 ;h[:AHt5.Ӯ ;y>P; 1nzw,zgWe  (q N1-qX)#'#e ĐpA&D6Do/bQXS#ZTioG;[md *H\WbΪg +|׭k~]\-[h~HOuۮFRʙ, ⿫VAqfDk~Ԍ2@-91U+'5TIH)ߖ2"W}iؖ4h3b1Ͽp!@4 (fAaKz?Ԏtz9ǝc{M_HFS(GNq¸>IPY-op?/e@U% ~b5曆+)CR^'%ǓG<{Ro>2oQ&< ^5hsE{%y$ Di3Th#JRldӔt`AGupɁ*skJp<+MGa'FUL-=e´$Fo^3g˴@x;vlMx+tfnI _(sc AX'|>1:庁fUZ6O?ďן^Z]f-=+- 3⻝Y+f]׳ּȉzu;'"N5Hu:䫜6Ư2 ^/="ąǟ,\\]e-ϝ1m߮#6/y&u~yWha˳Y\SƹކkWܵ6̭TM.TnIVғ3ke@OeJ3GDQ0{ktX'\-lXj_|Q"?]tY02.U5ao``oפ0. ٳ`!7vP 黏 \5ﮋ]c3Z!_prŴ"i:_ϓ׋]P|.p8ߛO+WIh%ՕLT1xK&NY}ްa}9[7_} =՟nngjQD>2q׬Q@.Mp߶俢{jBe&seir׫7ƞ\|?}7Zѯ|z_vvOvcXc} kEnӣq4NQ9ڧ>;a| ӞY$< nb=齳l8ßkWWtzm9nc3qkS,J93=;O8Ge]d`v>X>==sx`H!TS*o^=U4s8{W?wر,'=cgt?( |Pp CB68CEq@XF (M8y r(-S?|s艋‰ -N&ەd- Y9z~>{w^de{>{_ΦnˌE`^ލ%nʈ5єFM}[ZN|.X?B':.#G\}ӫXPlUZ}X:tiSW79j~9c%#ǀv_%ejBUvW S;X'`+ߠ}:oyQMU @Mu#_g /%yE_%Lk{ {sBk֢IbXEZh*K8'LX[w!.ngUtri^& Մ lbu Y2vQG jjWSQܾm "SZS w=^#dmNsӵf^9-1NB ݩK^5t&/zO6I9R/#D5nl{р=H,mHhct~,^x#m& }.)?%$u:tRD*KXI7cRKr. (QDf!]tdO[tfř"@ ]UT?`wJGm1jPET\TEKe]1ȁ1;زBlD`bar K+3{|6ZMxSyY5;!@ňi;] zGu^@{ɦl9 /=8=d?^CZsTБl|jtm=y y'@6񷭑0Q!{``єzzJ/NI麪*#N?u"ΐ}gOgdmQ3kFKgY86Hu zb4Lr3[Aȡ5~m< 쮀 *|\k'Tsb  hFtRhʥ=(Op.- | @}:@"psaqv.\Mz"̒Bxg=Ś[ Iꔳk_w*R'SU>צOg5iCf9wa\1W4Ɓ]ӓ0Z-'j63̾ .I[-]C%bN3le*jh9ߞ&T@J -3Y̿b VeC{C3 __V TPU.-gnUFtt i/s_ ҇Ov:C>{@.>$wTVTB t }v+Bލqjh알H4C#:!~ )8ᣬ8TLeh af[wcagQVB AQfՒ5|f[TDB4V#IV:S9o1{uȺ4V2fJ݆vScz0P1pkÇN-~Y9W4=lAV /ƵqF윬|:f{.Ƙ˵:*'CPbyַ4cDfhibV6jV\{w=nW%La!AolDN%QO)}o>I0dܺunuխblnC4w˕+Lqܑ!&2&n4J![%}+$`N0T5 > "hGΒXQ/4ͦjEh/F)w}v0|I.xKL1:p=І"Dot%>J}(%x:DW:#8X D qX+:!,&H qz{ԟK_^ f1|?_݊^ V{1 %9aY8?,u66}ʏ6eNڛͼE&)+MA0%a0J`TEII#,0u;-=`Q*h!mP.((bbe&]Z&] P%MZS|3^WB.5U@R Fe>$Y)`]纉 IM)"UfFHcwD BQN( ky&%$Buj$??6TlSx,wqFn ? ,xA_084;"0%evl*.N/92܊J PbxBrP" 7n҆;h:{ eyvLܷQ(_qlMwVbP O?\BJ{M童ď߿#>|ŗ] \1:yt6W1DdX+G ӛ+.bMʏw=~P>z9`bEW[3_|>8ƒSfqR#|5dEn*.Q .X 뫕|klw8C]!OY6%Z*̈g~͏,oGiVqr0PUHbpۦ.+#U'Ѧm'{|A\{Nx~bsc0ŃAFR LKyJb@'|8ALSRQ&c`M+ H5$_9>Ӱ;8բ |e JT{4BpCA8o 9t,%AXaߖ} 酂40:#PX97BK[!? *??KQ-8m'߃4D/]dT1C 0C.JAWQEDhH32TܱI@ɼČkǘ%Adm0-X9Wt M0hIF=ovn.XƭT,[j]1(dvsk!0RhE1G"-7 9G zgi0$v ! 4e^8 )(6PS 5FMD零{QqF`V1eo`'> IXI=2^  (eO[; Asɕb{Ν0H$Ҋ)"B:x+J{ NѲnFcElhfR2"qCm;Qx!bj)9)ַ>yHvPZ˵e^eAfCi6ѡv:=u`n:V;!;cTrwP{x b_|҉X!ECi8;\s-Ru5Crt/FKjXA#^lzƊ0jҝ; ;()V#+E}gMurv}BM'B;T;$uIcNY{S;L@.4-%ꃍ54wtULnFld7F}Xm{/-<ŶO[b[}~ NŶ2ae[Z<*4&9A^M>s6ŗJ,л*5ބ =so~Ty#I^sov&!ttˡJU!UybfϋSXAfl`/]CVTl-^IXt,/Ir,^PxAƈ1*1ǽ)L˺K ߡA7|.*ף>^ROu3nfphmp"- Ͽխ<@Ϸ~_ɥSG!W4瘟ϰS/ ѡlk3.%[seُ%^yE`Vإ3hǓÂ1=ԝ^Lw"Y!}?h[Ϗ{{u*'&I9;TaMy?>xq]BЩ<],[5xXG'wga6lweۢ{)k;Yx!LQ>yknl'xvjmKcQK=4yҩH^u>jdI,z:6'wG;{6t3XVk6\n6Q_Er a-z; Tep׳ӸpzR#3W=+&q ,^rXmЊ LE<ĘIdhraRIR QtbCWMq)R9ex0'_bcIp4s<9WN l_dR!*SdKSy;[ hk;)Amͽv~R9yKObvx~<;NJZG.+>Iv/sٸ%ųE.^0q*Z\9rR!g)&F:uUUsN%:P,T,"#W<yO cR$Q-F5G,7;yt哋œdDoMpHL;X?ڲFdEbE+CXN'Xao؅.phcm;J/kB"@ vRefh/^V8l2} jzp`*ERV PɌ"r x)6ppaZʰtm:˅R_ +6EYa`Z^(+5зtLF84e_վ;DfgdIF0F,&2sӭӹ/JÌ.~13$Vyӑ$g!W!)`V82!d\w_Ui3jU zN؂JݳL6V _vQ.y֛@UۿL/@.PNs2qӚnʂ_y٠MEfV'$ g]9}(agtLscJkerJ@OI_:YUnM * m]:Jb&(kduɄ6kй=;%:|~9NJ$4=UUKҪpro;T8sKZ A(wQG/My0T|#AUxiTqeXEv (op*ǟˍ ҢawuYt-g-SeW<˺Sʱ "蚫\/uE2$8ױK\J皤/t) (FJ6Oh%0],(wmB"/L~]zd`y7׃ t70Ie7گ()\L̇4;a@tbl,Aw韟}`i7Rhz~ C{7n۰!@(Q /FԠnEfq70;wem`X,,>B_Ƌ]͉iҹ՟V ?_??}]j^=~"c 3!7- v:cLԑ PQ!5o`Fu `iAs}dgo{za1XBpo,߭೙͓ǂ/aݦ9W7fv$_{㧻[F? "gC@1FP<&a_ߠk(N0͙ƺU>mC1NkAhN(y[qkgSRV`e nhѳ18Q4Aݦ7X!0Q{H2e%z6"͖qNFkf0AH {Pb90@^G,K'+Ld#6zAC@+!07BAe hS1APY[bԑB>j}n* Zĭʪg=_mV?ywS-WtV\рQ|r~瓄;|~/%fᗻ0_gp7*l]IY˾V5w_y/E[V>PP =F\ H"ba&D D(,cv+֥t(A:{ _ޑny޾ޭV unUqq#v}V*V*VjVj$O\vl;6,ynPqPS|PSqP3|P3qPsqPﺡV ނ@)* RK*A1I/0GBsZQIy&~LJĹjC LJDйjCjgL7 \D6BNJjOǷǵms6 G_5mtnvaP$kז6s{@91d'- U4i"K" @~%Jjv~8~1^/z+|{;?Uq>~ͷ?>p[LilfZ[3@oeZdzԞjXZ=xRnA2ztj!ڥN<Z+vpgZe尷DY]ݲ[&IY-3 {MN6zDX.x ok|]vM\]mHq ~nnn{[h.ެuM#Ib`=~bK{i?o|uv/~HA2[fQh RDg=s>޴qvCB)}4vQj RDg=.4|$6P)3wn}Ľi5m y":DTTiXeu1h RDg=.}R{FnëEtL=foi7+ͣnC{E=`<3݆i&j1$䉋 _}s[Yx9ӡ RDg=*lq9؉m y":HxUzȱ݆2Hv{K3"Q!!O\DɔǶ%ZOu$(%m}rAUT%8H+0n4UQEو#!l`m$Zcm%U=##>j{#*UB ʔ5Ƕ5PUU598pwt{n 8eG FO͚-LIst{VPf2ke {(2k(S]f2kYs$s/&QYڨ#A38̚H:2kYLi}|5.e(6H`ePC8ʬQfmԑm=dfMZYڨ#:q|5ŭe ;ژ@5ʬ:ǗYS)ʬQfm̑n_fM6Yڨ#;̚VPf2kefsYڨ##̬H9c2kYL#\eF ˬke(6H׬YheF 6>̚U֬Qfmܑr̬Ye(6H3kQf2kc_f-ʬQfmԑ2k{2kO!2k=`f 'Qc)FQGǗY( 2k;\LRIrd/G/-Wu>g¿ВMIr{hr. Kھv>i=ws=koҶ|:Fֆ[Ӷ@^g|Ҧ 1f392ͼmu[sHۆyļaVuBtVZp-Pj`8{SвQ2ş1n]=Ia[(r ǰFm:=mCifQz^1\gcP?º3!km k=[6 hTN ChL 9t K pB=W4{U3qļFaE?\1mVT"TAqP;h]%kEŐo*-Fjgsz +tPCfX 2\҅߮.v~[:|v`j>b|28}= 4S M5 ̴^oVb;?6'խO"ۓ+\{ryRiϣUYրQM9g4hnF*ӨӵDNh߾Es|Z$6rͤ CyV7so*!q8@ެ3>l|l޽9dOq9VÏk07An~T}};^OnSu2g71o_TM=$^x\NPa)|w?a3^ml_OE]-ǃz5RTR>fyMm텯B\Գ|f̊_ξ}yVFgů ś wne@͢{7ɇ'/D j#%< s)A4N(լ?LiS[FƄr#0Zbi!^eq9ЉS ,u|[L*en_9k>nУyI^bXB01-T4J4e2Y(w/1-t "!ЖQJ gRX( O2b^b>EMCq\xf /1#!\iLxmcx21#!Y\xrxqa %3)J$fsi0bxqa)=k^&<xR 0ᅃuhˆ%CB@&<#-UK J] ra%CcxPiJ4l3Y.(w/1$%] 2s*<R)<\x+/%x# K> J%%m OÈ%C@.E3c^b0TҪp%˃",R}[( OhAōCRʔ(&H29-%7*[ \xx<1$lƽa/PclOpH'зe O3F+%Cy8J F2U \h"; ISRĐ7% Cݙ6L^bH8p:9XG %+%c˅*>ōJ}ri+DxPaNR@.<G"xP2Tl ΋%@ ad“x!)P%w{ 8c$/XZ+2sYK!`(Z{ Cxa#P^oV=B?e7)X˪>_,^-śiSr]qT.v]?k|v76ŮooEߞphi۞@/VnN']ֳ?>ٓs_]t?MfE.^/}}[n骝Wͻ!u ^ Jw;~7>{:y >Vx9Ujjx]MnZ1!b\j|QCH?z~ﷻ 쉫G?p!\x8OK}/12+s ȱ%`(N:K (ulLx%V Q 6O˅$;T\9Zx OC[K Ujg4mۗ pAK%F.!#9ZlK I -p.<6h&xĐpGEYIdb5d_Q=EEm&.$f8v*p9X1ЃV%ƣp3@Bs l}i(Nj'ԿW}3bb+b3.^UQ$+ll?;6 E`^sM@R> B{!:qvk4.qSTǣeH ƣ_TeL~LCʏyc<8ٲTGs_T ˾{?/kS '|2`zB`i.\9cZ0cͬ+7| V+w9/MRd:K,4$`Lj+~18ZY~9ҩyA1qWE/fij/ޅI6 A;訢̃N35%L,)`hB&*a5P)rRe(*0&`"^2fof3&[J99Qk5P]m b*$@,,9u޻`2r(  @#XH/cc),HlFL? A,u+@qqc$ R1h.mpԈ2KOnb,SNH2c`.qN>Z_mkB]>l+|BO^ Zs,Cz5y}zXO,j-0jq4T}pO^""G$OWLg%[^T'}'V!\(ID leY R`8 ^6^hCEXXoDjBzٗx212pM6m]]ďBQ;2i|20kdB7-Ns֋@F IڣqXĕ$Dc,-t0Yz Se193JKĕڸWmh픡!`"hI1}WĠ[41ˁC3ڞэ?o`glNajmtjwn<;.J螹ى5W8ه߯qDinI K%xC>]B9e"Se64qB,S\M0e3dd\b%8։[N x8vN[[w+~ވ3ΥFFȃ`9!b9J")k< ތDe1JnjLR 7ے[Y҃YJ3RZLK,.Īڹ{h ]kKQbRBS̥xkmvX)&=H6SZżĔQ( eʒ)x UmPbaE&-I *['!Y5{•IHwOBVL8ћ)9R6'lHNB͈69MY]H2$vMvKBkl N C '$#tFKKc>b7-% ֛19kU3}NI.~ ^3_Uͫ@R 3HnKrGAĪ *`^c RCLu8&p6ͧ-)[{k)%L֚S^ZpAaSoaMּfF҂$kg ~Z}C~y#VoXPvkH3=0 ֋]EG^oE33Z4Ze}rUw-E/?ng7zpэ5K6oN:9&ھc1HЋ8YxCT: fq-6ԕpL0xi)l{Xik,CCz5V.{g.mdN68[[ RD;vn{Okn+>$䙋h+ڍNg>vkA}.h5nO:$j}H32ŵT7//Jω(ND鹘dCZ>ŴюEQ²(0Kd 9M#ӂ`Δ[A]f2Vt<'CkY̥r +tCkN +&Y"]R9q|{୕v;@Cp~ x Sۏ3G% kibɢc5.L_/[S)QPm-(8>W ,ӷ ?E/J@R{g~Dqs{{g\(pݪJ[1vAv`>V]sWjU:t>՟?z/=8A:8P FTo5JKK`?ǟrdɥ涜}̂>`ȵ:cRlj+ՔQMIR=-U#՚g;V9f@mɃ% ғ5֙_'+=>Vz23.] l6|O^5c+Ob4_8[pz|p,G3 -4G 6l0|zTSKF9V޹vwN-x?N/s.EXgO8Kќ-{Lh(B븗ZKy\!+rCș奀σqxumszK5%38 ^抔2gE8v{Ҝև1_Ni}wCU ~ Fh u~@z= WS߼Bbl? ’sks"½_DIb-x  )FՀc9ȏN_ {Q[#fgi/` 6'ۭK;D sx0 j62qnӱ/E6N$++ ʀdĥkfr9jKh,)1ڱoGBvK[Zw Qbx>! `μHHI{gHk0s&^65VqJB7`T0+JЃFLC)-$zORRhsrA)"~bp/%c5Oa5FsSt"0Oc5)a :,-&TP$m7B/]E@OȃC,=32c$ĥ'пzwI^==7r>HTU zO90l]T.JIbkNvd ,[Fcd~1dZ>&S|kĶZ:ROZKc۩tmhWG avz!@ӨN'{i$B"UHXa'oR\63 ` e-3m>$zhdab詔(X $Nw߷h|e5NZXgÙRۅ,h-&5º!'Y0AQ4* xj#,u@"_iޢÉ|,"_a3:J(r)5{J_XJ;S(E)GJm ^mqaik"yl"`4 Q'( $ 5k,~$qgq{Qs^Lyc> VK&dJf sJ_Ԩȓ.3h.݅*U= gnէ~GI\]_}{Gfトf7 +#P+J.f j.=|f˻777?-wora~X]ϖlUT?Wd姪~z^e̛rj_]|jcw4$wo8{Sk[^ I9K&S{U{1zr7C q!| /K_>2SAƲ׀:NuWוV Y#Bm6JI8u{OMp~9H}w^ݿmo?Bl,?g v3l[[ Ιw}o]cPܧ| 0TF+X">̾ORVXYbǒ"DW}U_nYWn>1SF)qA]]ذPKȽ83|؈gHd*ejZnDz)7 /=vC:@0tƹїr5opiUuNP ҫz窒qKs0\7Xzf>aaa rP fh4Ï=V۽M@FСk^43V@jG3v \lYi:hc{\ vd0%jD݇ k%g~՘ju@y{J՚Zr9'5AҐ5W˂F^,)4d˒@&D| -DZkƁr5ۇ+S(YcϢWOoLHX5wΏ-y^R;rMzC mh!.%\ gG- p-߉A>4phxau68#uɂQԩaSI7R}jW4BrqCw3Od(TCcMȲZ˟>摽A]MlۈT3x>#jf<@q(`T3 "B#%ZaO]ݺ3ґծ:Tj`YѨ24A5E;oDm8A8#k´VD;7dW%#+t鈞(EeWhvͩoP:nHlpzu),/=2]?!_\i Nsn_, u8Xh.b&EJ ؞]XY[g"X)V7i}UԈVHY_5zSHVn7b$)<)ؤQbbF-?:/E!zDrphሗ4pЦ*k&t%pଭ(xܠ,\#q.KvOnPnv8Mm*UПf]]_QHȆ}Y4}oN$K]v"F44Gi;9͑xir&LJ~7M@3x&QH>71Ͼ9r}-fC4/V]]YVk+Ӆu-uQVDjVWY_Rb^w)K *+S˫¥)ue-QJ$Y=v+ U^[ DGaj{cw4LRUi9D%oLɷ ވ}4=hW/N c;`ŏn kѓ)T?4{SR&tCZr.JKtѝ6z٭S!23N#r e5v 2 rhl?4x{ݚLW8%HkWP8ǒqU]TPxTx=Qe1OxV .j+.?W5E93^h=i(}ns NoV Hk K >KD6`y++MEU/v@Cg}CODzٓtmrr&V1-ZV.SX:kXVu]L:fbu(Kf B`EOǭl{[; g ra/ýMOve`SBU[Aw(ia=8Xe:F6(<JRA0EPH A^qeIJ'$+x #YXR6V(ey)L፪ L W ޡޔƫyKYhJ[Q2itâMznİ~D)/dbj?%Qi ԧ/qM+i\N$s;!ZTqbƉ^7aB4 ~߿!PydGi ,ɛIbImt})7[-@} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/etc/webhook-cert/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Feb 23 09:09:03 crc kubenswrapper[4834]: > logger="UnhandledError" Feb 23 09:09:03 crc kubenswrapper[4834]: E0223 09:09:03.892221 4834 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 23 09:09:03 crc kubenswrapper[4834]: container &Container{Name:approver,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c set -xe Feb 23 09:09:03 crc kubenswrapper[4834]: if [[ -f "/env/_master" ]]; then Feb 23 09:09:03 crc kubenswrapper[4834]: set -o allexport Feb 23 09:09:03 crc kubenswrapper[4834]: source "/env/_master" Feb 23 09:09:03 crc kubenswrapper[4834]: set +o allexport Feb 23 09:09:03 crc kubenswrapper[4834]: fi Feb 23 09:09:03 crc kubenswrapper[4834]: Feb 23 09:09:03 crc kubenswrapper[4834]: echo "I$(date "+%m%d %H:%M:%S.%N") - network-node-identity - start approver" Feb 23 09:09:03 crc kubenswrapper[4834]: exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 \ Feb 23 09:09:03 crc kubenswrapper[4834]: --disable-webhook \ Feb 23 09:09:03 crc kubenswrapper[4834]: --csr-acceptance-conditions="/var/run/ovnkube-identity-config/additional-cert-acceptance-cond.json" \ Feb 23 09:09:03 crc kubenswrapper[4834]: --loglevel="${LOGLEVEL}" Feb 23 09:09:03 crc kubenswrapper[4834]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LOGLEVEL,Value:4,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Feb 23 09:09:03 crc kubenswrapper[4834]: > logger="UnhandledError" Feb 23 09:09:03 crc kubenswrapper[4834]: E0223 09:09:03.893467 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"webhook\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\", failed to \"StartContainer\" for \"approver\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"]" pod="openshift-network-node-identity/network-node-identity-vrzqb" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" Feb 23 09:09:03 crc kubenswrapper[4834]: E0223 09:09:03.895018 4834 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:iptables-alerter,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,Command:[/iptables-alerter/iptables-alerter.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONTAINER_RUNTIME_ENDPOINT,Value:unix:///run/crio/crio.sock,ValueFrom:nil,},EnvVar{Name:ALERTER_POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{68157440 0} {} 65Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:iptables-alerter-script,ReadOnly:false,MountPath:/iptables-alerter,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-slash,ReadOnly:true,MountPath:/host,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rczfb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod iptables-alerter-4ln5h_openshift-network-operator(d75a4c96-2883-4a0b-bab2-0fab2b6c0b49): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Feb 23 09:09:03 crc kubenswrapper[4834]: E0223 09:09:03.896269 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"iptables-alerter\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-network-operator/iptables-alerter-4ln5h" podUID="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" Feb 23 09:09:03 crc kubenswrapper[4834]: I0223 09:09:03.948142 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"8c02bb61a788b034553884a4d741a87460108fe6349e512687dc1c5db2c6b22e"} Feb 23 09:09:03 crc kubenswrapper[4834]: E0223 09:09:03.949567 4834 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:iptables-alerter,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,Command:[/iptables-alerter/iptables-alerter.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONTAINER_RUNTIME_ENDPOINT,Value:unix:///run/crio/crio.sock,ValueFrom:nil,},EnvVar{Name:ALERTER_POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{68157440 0} {} 65Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:iptables-alerter-script,ReadOnly:false,MountPath:/iptables-alerter,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-slash,ReadOnly:true,MountPath:/host,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rczfb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod iptables-alerter-4ln5h_openshift-network-operator(d75a4c96-2883-4a0b-bab2-0fab2b6c0b49): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Feb 23 09:09:03 crc kubenswrapper[4834]: I0223 09:09:03.950588 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"842188667b57666feae1345494d5c953c76f6ff1937d01087cd866f9fe646a56"} Feb 23 09:09:03 crc kubenswrapper[4834]: E0223 09:09:03.951069 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"iptables-alerter\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-network-operator/iptables-alerter-4ln5h" podUID="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" Feb 23 09:09:03 crc kubenswrapper[4834]: E0223 09:09:03.952873 4834 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 23 09:09:03 crc kubenswrapper[4834]: container &Container{Name:webhook,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c set -xe Feb 23 09:09:03 crc kubenswrapper[4834]: if [[ -f "/env/_master" ]]; then Feb 23 09:09:03 crc kubenswrapper[4834]: set -o allexport Feb 23 09:09:03 crc kubenswrapper[4834]: source "/env/_master" Feb 23 09:09:03 crc kubenswrapper[4834]: set +o allexport Feb 23 09:09:03 crc kubenswrapper[4834]: fi Feb 23 09:09:03 crc kubenswrapper[4834]: # OVN-K will try to remove hybrid overlay node annotations even when the hybrid overlay is not enabled. Feb 23 09:09:03 crc kubenswrapper[4834]: # https://github.com/ovn-org/ovn-kubernetes/blob/ac6820df0b338a246f10f412cd5ec903bd234694/go-controller/pkg/ovn/master.go#L791 Feb 23 09:09:03 crc kubenswrapper[4834]: ho_enable="--enable-hybrid-overlay" Feb 23 09:09:03 crc kubenswrapper[4834]: echo "I$(date "+%m%d %H:%M:%S.%N") - network-node-identity - start webhook" Feb 23 09:09:03 crc kubenswrapper[4834]: # extra-allowed-user: service account `ovn-kubernetes-control-plane` Feb 23 09:09:03 crc kubenswrapper[4834]: # sets pod annotations in multi-homing layer3 network controller (cluster-manager) Feb 23 09:09:03 crc kubenswrapper[4834]: exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 \ Feb 23 09:09:03 crc kubenswrapper[4834]: --webhook-cert-dir="/etc/webhook-cert" \ Feb 23 09:09:03 crc kubenswrapper[4834]: --webhook-host=127.0.0.1 \ Feb 23 09:09:03 crc kubenswrapper[4834]: --webhook-port=9743 \ Feb 23 09:09:03 crc kubenswrapper[4834]: ${ho_enable} \ Feb 23 09:09:03 crc kubenswrapper[4834]: --enable-interconnect \ Feb 23 09:09:03 crc kubenswrapper[4834]: --disable-approver \ Feb 23 09:09:03 crc kubenswrapper[4834]: --extra-allowed-user="system:serviceaccount:openshift-ovn-kubernetes:ovn-kubernetes-control-plane" \ Feb 23 09:09:03 crc kubenswrapper[4834]: --wait-for-kubernetes-api=200s \ Feb 23 09:09:03 crc kubenswrapper[4834]: --pod-admission-conditions="/var/run/ovnkube-identity-config/additional-pod-admission-cond.json" \ Feb 23 09:09:03 crc kubenswrapper[4834]: --loglevel="${LOGLEVEL}" Feb 23 09:09:03 crc kubenswrapper[4834]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LOGLEVEL,Value:2,ValueFrom:nil,},EnvVar{Name:KUBERNETES_NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/etc/webhook-cert/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Feb 23 09:09:03 crc kubenswrapper[4834]: > logger="UnhandledError" Feb 23 09:09:03 crc kubenswrapper[4834]: E0223 09:09:03.956637 4834 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 23 09:09:03 crc kubenswrapper[4834]: container &Container{Name:approver,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c set -xe Feb 23 09:09:03 crc kubenswrapper[4834]: if [[ -f "/env/_master" ]]; then Feb 23 09:09:03 crc kubenswrapper[4834]: set -o allexport Feb 23 09:09:03 crc kubenswrapper[4834]: source "/env/_master" Feb 23 09:09:03 crc kubenswrapper[4834]: set +o allexport Feb 23 09:09:03 crc kubenswrapper[4834]: fi Feb 23 09:09:03 crc kubenswrapper[4834]: Feb 23 09:09:03 crc kubenswrapper[4834]: echo "I$(date "+%m%d %H:%M:%S.%N") - network-node-identity - start approver" Feb 23 09:09:03 crc kubenswrapper[4834]: exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 \ Feb 23 09:09:03 crc kubenswrapper[4834]: --disable-webhook \ Feb 23 09:09:03 crc kubenswrapper[4834]: --csr-acceptance-conditions="/var/run/ovnkube-identity-config/additional-cert-acceptance-cond.json" \ Feb 23 09:09:03 crc kubenswrapper[4834]: --loglevel="${LOGLEVEL}" Feb 23 09:09:03 crc kubenswrapper[4834]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LOGLEVEL,Value:4,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Feb 23 09:09:03 crc kubenswrapper[4834]: > logger="UnhandledError" Feb 23 09:09:03 crc kubenswrapper[4834]: E0223 09:09:03.957839 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"webhook\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\", failed to \"StartContainer\" for \"approver\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"]" pod="openshift-network-node-identity/network-node-identity-vrzqb" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" Feb 23 09:09:03 crc kubenswrapper[4834]: I0223 09:09:03.962157 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:03 crc kubenswrapper[4834]: I0223 09:09:03.972090 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:03 crc kubenswrapper[4834]: I0223 09:09:03.983780 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:03 crc kubenswrapper[4834]: I0223 09:09:03.991002 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:03 crc kubenswrapper[4834]: I0223 09:09:03.991046 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:03 crc kubenswrapper[4834]: I0223 09:09:03.991059 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:03 crc kubenswrapper[4834]: I0223 09:09:03.991080 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:03 crc kubenswrapper[4834]: I0223 09:09:03.991093 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:03Z","lastTransitionTime":"2026-02-23T09:09:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:03 crc kubenswrapper[4834]: I0223 09:09:03.995029 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.005987 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.015293 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.024054 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.034996 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.046339 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.056249 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.066510 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.078768 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.093808 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.093851 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.093867 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.093886 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.093895 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:04Z","lastTransitionTime":"2026-02-23T09:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.157517 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Feb 23 09:09:04 crc kubenswrapper[4834]: E0223 09:09:04.170154 4834 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 23 09:09:04 crc kubenswrapper[4834]: container &Container{Name:network-operator,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,Command:[/bin/bash -c #!/bin/bash Feb 23 09:09:04 crc kubenswrapper[4834]: set -o allexport Feb 23 09:09:04 crc kubenswrapper[4834]: if [[ -f /etc/kubernetes/apiserver-url.env ]]; then Feb 23 09:09:04 crc kubenswrapper[4834]: source /etc/kubernetes/apiserver-url.env Feb 23 09:09:04 crc kubenswrapper[4834]: else Feb 23 09:09:04 crc kubenswrapper[4834]: echo "Error: /etc/kubernetes/apiserver-url.env is missing" Feb 23 09:09:04 crc kubenswrapper[4834]: exit 1 Feb 23 09:09:04 crc kubenswrapper[4834]: fi Feb 23 09:09:04 crc kubenswrapper[4834]: exec /usr/bin/cluster-network-operator start --listen=0.0.0.0:9104 Feb 23 09:09:04 crc kubenswrapper[4834]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:cno,HostPort:9104,ContainerPort:9104,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:RELEASE_VERSION,Value:4.18.1,ValueFrom:nil,},EnvVar{Name:KUBE_PROXY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b97554198294bf544fbc116c94a0a1fb2ec8a4de0e926bf9d9e320135f0bee6f,ValueFrom:nil,},EnvVar{Name:KUBE_RBAC_PROXY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09,ValueFrom:nil,},EnvVar{Name:MULTUS_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26,ValueFrom:nil,},EnvVar{Name:MULTUS_ADMISSION_CONTROLLER_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317,ValueFrom:nil,},EnvVar{Name:CNI_PLUGINS_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc,ValueFrom:nil,},EnvVar{Name:BOND_CNI_PLUGIN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78,ValueFrom:nil,},EnvVar{Name:WHEREABOUTS_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4,ValueFrom:nil,},EnvVar{Name:ROUTE_OVERRRIDE_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa,ValueFrom:nil,},EnvVar{Name:MULTUS_NETWORKPOLICY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:23f833d3738d68706eb2f2868bd76bd71cee016cffa6faf5f045a60cc8c6eddd,ValueFrom:nil,},EnvVar{Name:OVN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,ValueFrom:nil,},EnvVar{Name:OVN_NB_RAFT_ELECTION_TIMER,Value:10,ValueFrom:nil,},EnvVar{Name:OVN_SB_RAFT_ELECTION_TIMER,Value:16,ValueFrom:nil,},EnvVar{Name:OVN_NORTHD_PROBE_INTERVAL,Value:10000,ValueFrom:nil,},EnvVar{Name:OVN_CONTROLLER_INACTIVITY_PROBE,Value:180000,ValueFrom:nil,},EnvVar{Name:OVN_NB_INACTIVITY_PROBE,Value:60000,ValueFrom:nil,},EnvVar{Name:EGRESS_ROUTER_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c,ValueFrom:nil,},EnvVar{Name:NETWORK_METRICS_DAEMON_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d,ValueFrom:nil,},EnvVar{Name:NETWORK_CHECK_SOURCE_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:NETWORK_CHECK_TARGET_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:NETWORK_OPERATOR_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:CLOUD_NETWORK_CONFIG_CONTROLLER_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8048f1cb0be521f09749c0a489503cd56d85b68c6ca93380e082cfd693cd97a8,ValueFrom:nil,},EnvVar{Name:CLI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,ValueFrom:nil,},EnvVar{Name:FRR_K8S_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5dbf844e49bb46b78586930149e5e5f5dc121014c8afd10fe36f3651967cc256,ValueFrom:nil,},EnvVar{Name:NETWORKING_CONSOLE_PLUGIN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd,ValueFrom:nil,},EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:host-etc-kube,ReadOnly:true,MountPath:/etc/kubernetes,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-tls,ReadOnly:false,MountPath:/var/run/secrets/serving-cert,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rdwmf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-operator-58b4c7f79c-55gtf_openshift-network-operator(37a5e44f-9a88-4405-be8a-b645485e7312): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Feb 23 09:09:04 crc kubenswrapper[4834]: > logger="UnhandledError" Feb 23 09:09:04 crc kubenswrapper[4834]: E0223 09:09:04.171336 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"network-operator\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" podUID="37a5e44f-9a88-4405-be8a-b645485e7312" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.197080 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.197115 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.197129 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.197146 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.197157 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:04Z","lastTransitionTime":"2026-02-23T09:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.267980 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.268097 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:04 crc kubenswrapper[4834]: E0223 09:09:04.268176 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:05.268154151 +0000 UTC m=+81.346468538 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.268218 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:04 crc kubenswrapper[4834]: E0223 09:09:04.268248 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 23 09:09:04 crc kubenswrapper[4834]: E0223 09:09:04.268276 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 23 09:09:04 crc kubenswrapper[4834]: E0223 09:09:04.268288 4834 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:04 crc kubenswrapper[4834]: E0223 09:09:04.268340 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:05.268325336 +0000 UTC m=+81.346639713 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.268253 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:04 crc kubenswrapper[4834]: E0223 09:09:04.268374 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.268512 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:04 crc kubenswrapper[4834]: E0223 09:09:04.268540 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 23 09:09:04 crc kubenswrapper[4834]: E0223 09:09:04.268557 4834 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:04 crc kubenswrapper[4834]: E0223 09:09:04.268652 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:05.268633114 +0000 UTC m=+81.346947501 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:04 crc kubenswrapper[4834]: E0223 09:09:04.268491 4834 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 23 09:09:04 crc kubenswrapper[4834]: E0223 09:09:04.268674 4834 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 23 09:09:04 crc kubenswrapper[4834]: E0223 09:09:04.268746 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:05.268730907 +0000 UTC m=+81.347045384 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 23 09:09:04 crc kubenswrapper[4834]: E0223 09:09:04.268798 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:05.268766438 +0000 UTC m=+81.347080845 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.299482 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.299547 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.299559 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.299582 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.299599 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:04Z","lastTransitionTime":"2026-02-23T09:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.382749 4834 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Feb 23 09:09:04 crc kubenswrapper[4834]: W0223 09:09:04.383134 4834 reflector.go:484] object-"openshift-network-node-identity"/"env-overrides": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-node-identity"/"env-overrides": Unexpected watch close - watch lasted less than a second and no items received Feb 23 09:09:04 crc kubenswrapper[4834]: W0223 09:09:04.383174 4834 reflector.go:484] object-"openshift-network-operator"/"metrics-tls": watch of *v1.Secret ended with: very short watch: object-"openshift-network-operator"/"metrics-tls": Unexpected watch close - watch lasted less than a second and no items received Feb 23 09:09:04 crc kubenswrapper[4834]: W0223 09:09:04.383207 4834 reflector.go:484] object-"openshift-network-operator"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-operator"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Feb 23 09:09:04 crc kubenswrapper[4834]: W0223 09:09:04.383231 4834 reflector.go:484] object-"openshift-network-operator"/"iptables-alerter-script": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-operator"/"iptables-alerter-script": Unexpected watch close - watch lasted less than a second and no items received Feb 23 09:09:04 crc kubenswrapper[4834]: W0223 09:09:04.383259 4834 reflector.go:484] object-"openshift-network-operator"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-operator"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Feb 23 09:09:04 crc kubenswrapper[4834]: W0223 09:09:04.383281 4834 reflector.go:484] pkg/kubelet/config/apiserver.go:66: watch of *v1.Pod ended with: very short watch: pkg/kubelet/config/apiserver.go:66: Unexpected watch close - watch lasted less than a second and no items received Feb 23 09:09:04 crc kubenswrapper[4834]: W0223 09:09:04.383314 4834 reflector.go:484] object-"openshift-network-node-identity"/"openshift-service-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-node-identity"/"openshift-service-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Feb 23 09:09:04 crc kubenswrapper[4834]: W0223 09:09:04.383336 4834 reflector.go:484] object-"openshift-network-node-identity"/"network-node-identity-cert": watch of *v1.Secret ended with: very short watch: object-"openshift-network-node-identity"/"network-node-identity-cert": Unexpected watch close - watch lasted less than a second and no items received Feb 23 09:09:04 crc kubenswrapper[4834]: W0223 09:09:04.383359 4834 reflector.go:484] object-"openshift-network-node-identity"/"ovnkube-identity-cm": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-node-identity"/"ovnkube-identity-cm": Unexpected watch close - watch lasted less than a second and no items received Feb 23 09:09:04 crc kubenswrapper[4834]: W0223 09:09:04.383379 4834 reflector.go:484] object-"openshift-network-node-identity"/"kube-root-ca.crt": watch of *v1.ConfigMap ended with: very short watch: object-"openshift-network-node-identity"/"kube-root-ca.crt": Unexpected watch close - watch lasted less than a second and no items received Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.401862 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.401948 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.401962 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.401986 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.402003 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:04Z","lastTransitionTime":"2026-02-23T09:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.504259 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.504323 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.504337 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.504359 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.504375 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:04Z","lastTransitionTime":"2026-02-23T09:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.580026 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 19:47:41.789627742 +0000 UTC Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.587864 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.588610 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.589707 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.590283 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.591237 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.591780 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.592367 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.593303 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.593987 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.594883 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.595497 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.596639 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.597366 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.597628 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.598092 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.598977 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.599524 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.600500 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.600892 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.601528 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.602479 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.602892 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.603792 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.604259 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.605234 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.605742 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.606393 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.606513 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.606591 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.606669 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.606545 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.606724 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:04Z","lastTransitionTime":"2026-02-23T09:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.607901 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.608381 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.609299 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.609856 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.611065 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.611114 4834 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.611317 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.613695 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.615279 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.615873 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.617966 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.618852 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.620092 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.620936 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.622361 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.623171 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.624013 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.625628 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.626508 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.627942 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.628715 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.629907 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.630561 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.631904 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.632380 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.633287 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.633762 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.634789 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.635364 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.636287 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.636545 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.647521 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.658006 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.709717 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.709784 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.709802 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.709828 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.709850 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:04Z","lastTransitionTime":"2026-02-23T09:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.812625 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.812674 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.812687 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.812708 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.812721 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:04Z","lastTransitionTime":"2026-02-23T09:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.915729 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.915770 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.915801 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.915818 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.915831 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:04Z","lastTransitionTime":"2026-02-23T09:09:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.954479 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"c3f96343a547d8bce66fa0e5b88fa3cb8e189a23478eeab6836ff69ad53977e7"} Feb 23 09:09:04 crc kubenswrapper[4834]: E0223 09:09:04.957937 4834 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 23 09:09:04 crc kubenswrapper[4834]: container &Container{Name:network-operator,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,Command:[/bin/bash -c #!/bin/bash Feb 23 09:09:04 crc kubenswrapper[4834]: set -o allexport Feb 23 09:09:04 crc kubenswrapper[4834]: if [[ -f /etc/kubernetes/apiserver-url.env ]]; then Feb 23 09:09:04 crc kubenswrapper[4834]: source /etc/kubernetes/apiserver-url.env Feb 23 09:09:04 crc kubenswrapper[4834]: else Feb 23 09:09:04 crc kubenswrapper[4834]: echo "Error: /etc/kubernetes/apiserver-url.env is missing" Feb 23 09:09:04 crc kubenswrapper[4834]: exit 1 Feb 23 09:09:04 crc kubenswrapper[4834]: fi Feb 23 09:09:04 crc kubenswrapper[4834]: exec /usr/bin/cluster-network-operator start --listen=0.0.0.0:9104 Feb 23 09:09:04 crc kubenswrapper[4834]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:cno,HostPort:9104,ContainerPort:9104,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:RELEASE_VERSION,Value:4.18.1,ValueFrom:nil,},EnvVar{Name:KUBE_PROXY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b97554198294bf544fbc116c94a0a1fb2ec8a4de0e926bf9d9e320135f0bee6f,ValueFrom:nil,},EnvVar{Name:KUBE_RBAC_PROXY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09,ValueFrom:nil,},EnvVar{Name:MULTUS_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26,ValueFrom:nil,},EnvVar{Name:MULTUS_ADMISSION_CONTROLLER_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317,ValueFrom:nil,},EnvVar{Name:CNI_PLUGINS_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc,ValueFrom:nil,},EnvVar{Name:BOND_CNI_PLUGIN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78,ValueFrom:nil,},EnvVar{Name:WHEREABOUTS_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4,ValueFrom:nil,},EnvVar{Name:ROUTE_OVERRRIDE_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa,ValueFrom:nil,},EnvVar{Name:MULTUS_NETWORKPOLICY_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:23f833d3738d68706eb2f2868bd76bd71cee016cffa6faf5f045a60cc8c6eddd,ValueFrom:nil,},EnvVar{Name:OVN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,ValueFrom:nil,},EnvVar{Name:OVN_NB_RAFT_ELECTION_TIMER,Value:10,ValueFrom:nil,},EnvVar{Name:OVN_SB_RAFT_ELECTION_TIMER,Value:16,ValueFrom:nil,},EnvVar{Name:OVN_NORTHD_PROBE_INTERVAL,Value:10000,ValueFrom:nil,},EnvVar{Name:OVN_CONTROLLER_INACTIVITY_PROBE,Value:180000,ValueFrom:nil,},EnvVar{Name:OVN_NB_INACTIVITY_PROBE,Value:60000,ValueFrom:nil,},EnvVar{Name:EGRESS_ROUTER_CNI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c,ValueFrom:nil,},EnvVar{Name:NETWORK_METRICS_DAEMON_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d,ValueFrom:nil,},EnvVar{Name:NETWORK_CHECK_SOURCE_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:NETWORK_CHECK_TARGET_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:NETWORK_OPERATOR_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b,ValueFrom:nil,},EnvVar{Name:CLOUD_NETWORK_CONFIG_CONTROLLER_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8048f1cb0be521f09749c0a489503cd56d85b68c6ca93380e082cfd693cd97a8,ValueFrom:nil,},EnvVar{Name:CLI_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,ValueFrom:nil,},EnvVar{Name:FRR_K8S_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5dbf844e49bb46b78586930149e5e5f5dc121014c8afd10fe36f3651967cc256,ValueFrom:nil,},EnvVar{Name:NETWORKING_CONSOLE_PLUGIN_IMAGE,Value:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd,ValueFrom:nil,},EnvVar{Name:POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:host-etc-kube,ReadOnly:true,MountPath:/etc/kubernetes,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-tls,ReadOnly:false,MountPath:/var/run/secrets/serving-cert,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rdwmf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:nil,Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-operator-58b4c7f79c-55gtf_openshift-network-operator(37a5e44f-9a88-4405-be8a-b645485e7312): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Feb 23 09:09:04 crc kubenswrapper[4834]: > logger="UnhandledError" Feb 23 09:09:04 crc kubenswrapper[4834]: E0223 09:09:04.959074 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"network-operator\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" podUID="37a5e44f-9a88-4405-be8a-b645485e7312" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.967992 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.981513 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:04 crc kubenswrapper[4834]: I0223 09:09:04.994607 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:04Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.005818 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.016136 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.018300 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.018471 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.018604 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.018702 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.018792 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:05Z","lastTransitionTime":"2026-02-23T09:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.026106 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.121948 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.121987 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.121999 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.122018 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.122032 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:05Z","lastTransitionTime":"2026-02-23T09:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.226725 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.226802 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.226821 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.226850 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.226870 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:05Z","lastTransitionTime":"2026-02-23T09:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.233254 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.278684 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.278784 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.278823 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.278851 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.278886 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:05 crc kubenswrapper[4834]: E0223 09:09:05.278986 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:07.27894012 +0000 UTC m=+83.357254507 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:05 crc kubenswrapper[4834]: E0223 09:09:05.279018 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 23 09:09:05 crc kubenswrapper[4834]: E0223 09:09:05.279064 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 23 09:09:05 crc kubenswrapper[4834]: E0223 09:09:05.279010 4834 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 23 09:09:05 crc kubenswrapper[4834]: E0223 09:09:05.279087 4834 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:05 crc kubenswrapper[4834]: E0223 09:09:05.279088 4834 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 23 09:09:05 crc kubenswrapper[4834]: E0223 09:09:05.279159 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:07.279133896 +0000 UTC m=+83.357448293 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:05 crc kubenswrapper[4834]: E0223 09:09:05.279077 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 23 09:09:05 crc kubenswrapper[4834]: E0223 09:09:05.279189 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 23 09:09:05 crc kubenswrapper[4834]: E0223 09:09:05.279197 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:07.279170557 +0000 UTC m=+83.357485134 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 23 09:09:05 crc kubenswrapper[4834]: E0223 09:09:05.279204 4834 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:05 crc kubenswrapper[4834]: E0223 09:09:05.279223 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:07.279210338 +0000 UTC m=+83.357524975 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 23 09:09:05 crc kubenswrapper[4834]: E0223 09:09:05.279256 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:07.279247029 +0000 UTC m=+83.357561436 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.330047 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.330099 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.330108 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.330127 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.330137 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:05Z","lastTransitionTime":"2026-02-23T09:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.403997 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.433588 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.433732 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.433758 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.433794 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.433814 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:05Z","lastTransitionTime":"2026-02-23T09:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.489996 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.536103 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.536857 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.536951 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.536976 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.536990 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:05Z","lastTransitionTime":"2026-02-23T09:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.547317 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.580453 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 23:24:09.798591921 +0000 UTC Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.584841 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.584890 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:05 crc kubenswrapper[4834]: E0223 09:09:05.585057 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.585106 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:05 crc kubenswrapper[4834]: E0223 09:09:05.585252 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 23 09:09:05 crc kubenswrapper[4834]: E0223 09:09:05.586113 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.591431 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.639438 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.639472 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.639481 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.639495 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.639504 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:05Z","lastTransitionTime":"2026-02-23T09:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.640766 4834 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.659160 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.705828 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.742295 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.742346 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.742355 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.742372 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.742384 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:05Z","lastTransitionTime":"2026-02-23T09:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.774891 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.845799 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.845853 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.845865 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.845884 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.845895 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:05Z","lastTransitionTime":"2026-02-23T09:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.930350 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.950379 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.950498 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.950517 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.950545 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:05 crc kubenswrapper[4834]: I0223 09:09:05.950563 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:05Z","lastTransitionTime":"2026-02-23T09:09:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.053548 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.053611 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.053627 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.053654 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.053671 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:06Z","lastTransitionTime":"2026-02-23T09:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.155836 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.155920 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.155933 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.155967 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.155979 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:06Z","lastTransitionTime":"2026-02-23T09:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.258762 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.258827 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.258845 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.258870 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.258885 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:06Z","lastTransitionTime":"2026-02-23T09:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.338433 4834 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.361632 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.361681 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.361693 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.361714 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.361728 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:06Z","lastTransitionTime":"2026-02-23T09:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.464545 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.464596 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.464607 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.464623 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.464632 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:06Z","lastTransitionTime":"2026-02-23T09:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.567936 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.567988 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.568000 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.568017 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.568032 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:06Z","lastTransitionTime":"2026-02-23T09:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.581525 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-29 03:54:02.076616661 +0000 UTC Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.671944 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.672009 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.672029 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.672056 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.672080 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:06Z","lastTransitionTime":"2026-02-23T09:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.775813 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.775883 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.775900 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.775930 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.775950 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:06Z","lastTransitionTime":"2026-02-23T09:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.879148 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.879202 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.879214 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.879236 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.879250 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:06Z","lastTransitionTime":"2026-02-23T09:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.982916 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.982982 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.982999 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.983023 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:06 crc kubenswrapper[4834]: I0223 09:09:06.983049 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:06Z","lastTransitionTime":"2026-02-23T09:09:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.086792 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.086872 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.086895 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.086926 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.086948 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:07Z","lastTransitionTime":"2026-02-23T09:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.190553 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.190624 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.190639 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.190663 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.190683 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:07Z","lastTransitionTime":"2026-02-23T09:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.293680 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.293772 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.293793 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.293822 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.293842 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:07Z","lastTransitionTime":"2026-02-23T09:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.299991 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.300112 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:07 crc kubenswrapper[4834]: E0223 09:09:07.300162 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:11.300132128 +0000 UTC m=+87.378446525 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.300236 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.300316 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:07 crc kubenswrapper[4834]: E0223 09:09:07.300249 4834 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.300370 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:07 crc kubenswrapper[4834]: E0223 09:09:07.300432 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:11.300390015 +0000 UTC m=+87.378704592 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 23 09:09:07 crc kubenswrapper[4834]: E0223 09:09:07.300330 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 23 09:09:07 crc kubenswrapper[4834]: E0223 09:09:07.300540 4834 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 23 09:09:07 crc kubenswrapper[4834]: E0223 09:09:07.300591 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 23 09:09:07 crc kubenswrapper[4834]: E0223 09:09:07.300628 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 23 09:09:07 crc kubenswrapper[4834]: E0223 09:09:07.300646 4834 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:07 crc kubenswrapper[4834]: E0223 09:09:07.300540 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 23 09:09:07 crc kubenswrapper[4834]: E0223 09:09:07.300678 4834 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:07 crc kubenswrapper[4834]: E0223 09:09:07.300655 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:11.300630993 +0000 UTC m=+87.378945550 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 23 09:09:07 crc kubenswrapper[4834]: E0223 09:09:07.300838 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:11.300764727 +0000 UTC m=+87.379079114 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:07 crc kubenswrapper[4834]: E0223 09:09:07.300874 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:11.300865329 +0000 UTC m=+87.379179716 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.396862 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.396899 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.396908 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.396924 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.396935 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:07Z","lastTransitionTime":"2026-02-23T09:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.501340 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.501494 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.501522 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.501553 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.501576 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:07Z","lastTransitionTime":"2026-02-23T09:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.582015 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 04:35:54.265654595 +0000 UTC Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.584483 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.584508 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.584483 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:07 crc kubenswrapper[4834]: E0223 09:09:07.584652 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 23 09:09:07 crc kubenswrapper[4834]: E0223 09:09:07.584799 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 23 09:09:07 crc kubenswrapper[4834]: E0223 09:09:07.584887 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.605053 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.605099 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.605110 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.605132 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.605145 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:07Z","lastTransitionTime":"2026-02-23T09:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.708818 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.709018 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.709060 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.709092 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.709119 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:07Z","lastTransitionTime":"2026-02-23T09:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.812410 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.813228 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.813276 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.813301 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.813316 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:07Z","lastTransitionTime":"2026-02-23T09:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.917236 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.917292 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.917305 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.917323 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:07 crc kubenswrapper[4834]: I0223 09:09:07.917335 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:07Z","lastTransitionTime":"2026-02-23T09:09:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.024350 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.024416 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.024427 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.024442 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.024454 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:08Z","lastTransitionTime":"2026-02-23T09:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.128521 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.128603 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.128636 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.128667 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.128689 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:08Z","lastTransitionTime":"2026-02-23T09:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.232327 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.232387 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.232425 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.232449 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.232464 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:08Z","lastTransitionTime":"2026-02-23T09:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.335608 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.335666 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.335680 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.335703 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.335720 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:08Z","lastTransitionTime":"2026-02-23T09:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.438378 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.438476 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.438497 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.438525 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.438545 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:08Z","lastTransitionTime":"2026-02-23T09:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.541648 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.542264 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.542437 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.542570 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.542682 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:08Z","lastTransitionTime":"2026-02-23T09:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.582517 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 13:02:26.005880121 +0000 UTC Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.607737 4834 scope.go:117] "RemoveContainer" containerID="081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.608492 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Feb 23 09:09:08 crc kubenswrapper[4834]: E0223 09:09:08.608833 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.646609 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.646669 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.646691 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.646721 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.646744 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:08Z","lastTransitionTime":"2026-02-23T09:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.750368 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.750489 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.750519 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.750554 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.750576 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:08Z","lastTransitionTime":"2026-02-23T09:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.853923 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.853992 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.854010 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.854035 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.854054 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:08Z","lastTransitionTime":"2026-02-23T09:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.957673 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.957765 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.957789 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.957823 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.957844 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:08Z","lastTransitionTime":"2026-02-23T09:09:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:08 crc kubenswrapper[4834]: I0223 09:09:08.966910 4834 scope.go:117] "RemoveContainer" containerID="081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d" Feb 23 09:09:08 crc kubenswrapper[4834]: E0223 09:09:08.967118 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.061381 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.061467 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.061484 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.061512 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.061532 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:09Z","lastTransitionTime":"2026-02-23T09:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.165175 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.165258 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.165283 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.165320 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.165343 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:09Z","lastTransitionTime":"2026-02-23T09:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.268686 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.268749 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.268760 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.268779 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.268792 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:09Z","lastTransitionTime":"2026-02-23T09:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.373373 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.373451 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.373465 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.373488 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.373501 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:09Z","lastTransitionTime":"2026-02-23T09:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.477174 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.477258 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.477275 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.477300 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.477317 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:09Z","lastTransitionTime":"2026-02-23T09:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.581147 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.581218 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.581237 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.581299 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.581320 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:09Z","lastTransitionTime":"2026-02-23T09:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.583668 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 17:56:59.03467648 +0000 UTC Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.585006 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.585079 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.585113 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:09 crc kubenswrapper[4834]: E0223 09:09:09.585273 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 23 09:09:09 crc kubenswrapper[4834]: E0223 09:09:09.585437 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 23 09:09:09 crc kubenswrapper[4834]: E0223 09:09:09.585559 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.683730 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.683809 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.683829 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.683869 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.683891 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:09Z","lastTransitionTime":"2026-02-23T09:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.787259 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.787324 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.787343 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.787372 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.787391 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:09Z","lastTransitionTime":"2026-02-23T09:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.890528 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.890593 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.890606 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.890631 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.890654 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:09Z","lastTransitionTime":"2026-02-23T09:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.993749 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.993797 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.993808 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.993826 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:09 crc kubenswrapper[4834]: I0223 09:09:09.993838 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:09Z","lastTransitionTime":"2026-02-23T09:09:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.096903 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.096963 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.096979 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.097003 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.097020 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:10Z","lastTransitionTime":"2026-02-23T09:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.200159 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.200248 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.200268 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.200289 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.200303 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:10Z","lastTransitionTime":"2026-02-23T09:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.303576 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.303632 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.303648 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.303669 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.303685 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:10Z","lastTransitionTime":"2026-02-23T09:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.406470 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.406539 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.406558 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.406589 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.406610 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:10Z","lastTransitionTime":"2026-02-23T09:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.509829 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.510110 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.510195 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.510288 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.510366 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:10Z","lastTransitionTime":"2026-02-23T09:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.583917 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 15:44:33.976369749 +0000 UTC Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.613202 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.613272 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.613296 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.613326 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.613352 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:10Z","lastTransitionTime":"2026-02-23T09:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.716884 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.716947 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.716963 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.716984 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.716998 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:10Z","lastTransitionTime":"2026-02-23T09:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.782477 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.782558 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.782578 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.782607 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.782629 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:10Z","lastTransitionTime":"2026-02-23T09:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:10 crc kubenswrapper[4834]: E0223 09:09:10.801727 4834 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a013d5d2-c870-42c6-8fef-9285815a5771\\\",\\\"systemUUID\\\":\\\"3bc3bfda-fc9c-4fdf-b927-7c29d17cae8a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.808781 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.808853 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.808867 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.808888 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.808904 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:10Z","lastTransitionTime":"2026-02-23T09:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:10 crc kubenswrapper[4834]: E0223 09:09:10.824741 4834 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a013d5d2-c870-42c6-8fef-9285815a5771\\\",\\\"systemUUID\\\":\\\"3bc3bfda-fc9c-4fdf-b927-7c29d17cae8a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.830019 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.830080 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.830095 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.830119 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.830140 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:10Z","lastTransitionTime":"2026-02-23T09:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:10 crc kubenswrapper[4834]: E0223 09:09:10.842710 4834 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a013d5d2-c870-42c6-8fef-9285815a5771\\\",\\\"systemUUID\\\":\\\"3bc3bfda-fc9c-4fdf-b927-7c29d17cae8a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.846965 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.847005 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.847015 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.847039 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.847052 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:10Z","lastTransitionTime":"2026-02-23T09:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:10 crc kubenswrapper[4834]: E0223 09:09:10.860368 4834 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a013d5d2-c870-42c6-8fef-9285815a5771\\\",\\\"systemUUID\\\":\\\"3bc3bfda-fc9c-4fdf-b927-7c29d17cae8a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.865449 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.865505 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.865521 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.865544 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.865560 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:10Z","lastTransitionTime":"2026-02-23T09:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:10 crc kubenswrapper[4834]: E0223 09:09:10.875887 4834 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:10Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a013d5d2-c870-42c6-8fef-9285815a5771\\\",\\\"systemUUID\\\":\\\"3bc3bfda-fc9c-4fdf-b927-7c29d17cae8a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:10 crc kubenswrapper[4834]: E0223 09:09:10.876006 4834 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.878084 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.878118 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.878128 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.878144 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.878155 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:10Z","lastTransitionTime":"2026-02-23T09:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.980624 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.980879 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.980922 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.980957 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:10 crc kubenswrapper[4834]: I0223 09:09:10.980981 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:10Z","lastTransitionTime":"2026-02-23T09:09:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.084355 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.084498 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.084530 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.084567 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.084592 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:11Z","lastTransitionTime":"2026-02-23T09:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.187756 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.187860 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.187896 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.187927 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.187948 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:11Z","lastTransitionTime":"2026-02-23T09:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.291787 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.291868 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.291885 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.291917 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.291937 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:11Z","lastTransitionTime":"2026-02-23T09:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.344248 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.344389 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:11 crc kubenswrapper[4834]: E0223 09:09:11.344570 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:19.34451465 +0000 UTC m=+95.422829067 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:11 crc kubenswrapper[4834]: E0223 09:09:11.344601 4834 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 23 09:09:11 crc kubenswrapper[4834]: E0223 09:09:11.344686 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:19.344658794 +0000 UTC m=+95.422973401 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.344716 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.344754 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.344786 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:11 crc kubenswrapper[4834]: E0223 09:09:11.344904 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 23 09:09:11 crc kubenswrapper[4834]: E0223 09:09:11.344926 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 23 09:09:11 crc kubenswrapper[4834]: E0223 09:09:11.344944 4834 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:11 crc kubenswrapper[4834]: E0223 09:09:11.345000 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 23 09:09:11 crc kubenswrapper[4834]: E0223 09:09:11.345004 4834 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 23 09:09:11 crc kubenswrapper[4834]: E0223 09:09:11.345034 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 23 09:09:11 crc kubenswrapper[4834]: E0223 09:09:11.345057 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:19.345042335 +0000 UTC m=+95.423356732 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:11 crc kubenswrapper[4834]: E0223 09:09:11.345069 4834 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:11 crc kubenswrapper[4834]: E0223 09:09:11.345125 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:19.345095856 +0000 UTC m=+95.423410433 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 23 09:09:11 crc kubenswrapper[4834]: E0223 09:09:11.345161 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:19.345144117 +0000 UTC m=+95.423458534 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.395350 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.395525 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.395551 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.395587 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.395612 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:11Z","lastTransitionTime":"2026-02-23T09:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.498768 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.498824 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.498836 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.498856 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.498872 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:11Z","lastTransitionTime":"2026-02-23T09:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.584119 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 22:07:16.750592449 +0000 UTC Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.584386 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.584486 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.584448 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:11 crc kubenswrapper[4834]: E0223 09:09:11.584637 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 23 09:09:11 crc kubenswrapper[4834]: E0223 09:09:11.584809 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 23 09:09:11 crc kubenswrapper[4834]: E0223 09:09:11.584888 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.602061 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.602131 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.602155 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.602190 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.602217 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:11Z","lastTransitionTime":"2026-02-23T09:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.706324 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.706440 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.706457 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.706478 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.706492 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:11Z","lastTransitionTime":"2026-02-23T09:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.809528 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.809578 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.809589 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.809607 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.809619 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:11Z","lastTransitionTime":"2026-02-23T09:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.913384 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.913460 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.913472 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.913490 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:11 crc kubenswrapper[4834]: I0223 09:09:11.913502 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:11Z","lastTransitionTime":"2026-02-23T09:09:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.016915 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.016991 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.017011 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.017040 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.017059 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:12Z","lastTransitionTime":"2026-02-23T09:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.120753 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.120839 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.120860 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.120886 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.120903 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:12Z","lastTransitionTime":"2026-02-23T09:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.225190 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.225300 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.225327 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.225371 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.225452 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:12Z","lastTransitionTime":"2026-02-23T09:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.328800 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.328864 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.328874 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.328894 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.328906 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:12Z","lastTransitionTime":"2026-02-23T09:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.432510 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.432594 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.432611 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.432657 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.432677 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:12Z","lastTransitionTime":"2026-02-23T09:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.535280 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.535349 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.535370 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.535428 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.535451 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:12Z","lastTransitionTime":"2026-02-23T09:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.584355 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 04:06:00.112083742 +0000 UTC Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.611091 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.638558 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.638620 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.638633 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.638655 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.638671 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:12Z","lastTransitionTime":"2026-02-23T09:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.741754 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.741839 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.741860 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.741889 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.741911 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:12Z","lastTransitionTime":"2026-02-23T09:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.846003 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.846051 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.846063 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.846081 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.846094 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:12Z","lastTransitionTime":"2026-02-23T09:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.950125 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.950204 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.950222 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.950265 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:12 crc kubenswrapper[4834]: I0223 09:09:12.950287 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:12Z","lastTransitionTime":"2026-02-23T09:09:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.053740 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.053801 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.053813 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.053836 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.053850 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:13Z","lastTransitionTime":"2026-02-23T09:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.156563 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.156617 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.156628 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.156649 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.156661 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:13Z","lastTransitionTime":"2026-02-23T09:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.259805 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.259865 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.259879 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.259901 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.259913 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:13Z","lastTransitionTime":"2026-02-23T09:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.364368 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.364487 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.364509 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.364537 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.364557 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:13Z","lastTransitionTime":"2026-02-23T09:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.467064 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.467134 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.467146 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.467164 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.467177 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:13Z","lastTransitionTime":"2026-02-23T09:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.569832 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.569885 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.569897 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.569916 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.569929 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:13Z","lastTransitionTime":"2026-02-23T09:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.584558 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 06:46:02.973020486 +0000 UTC Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.584725 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.584763 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.584762 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:13 crc kubenswrapper[4834]: E0223 09:09:13.585044 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 23 09:09:13 crc kubenswrapper[4834]: E0223 09:09:13.585212 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 23 09:09:13 crc kubenswrapper[4834]: E0223 09:09:13.585452 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.597349 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.673777 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.673837 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.673849 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.673875 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.673889 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:13Z","lastTransitionTime":"2026-02-23T09:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.777080 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.777126 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.777137 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.777156 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.777170 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:13Z","lastTransitionTime":"2026-02-23T09:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.881106 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.881158 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.881167 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.881185 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.881198 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:13Z","lastTransitionTime":"2026-02-23T09:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.984175 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.984220 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.984231 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.984250 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:13 crc kubenswrapper[4834]: I0223 09:09:13.984262 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:13Z","lastTransitionTime":"2026-02-23T09:09:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.087497 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.087573 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.087586 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.087613 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.087631 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:14Z","lastTransitionTime":"2026-02-23T09:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.190794 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.190851 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.190865 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.190885 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.190899 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:14Z","lastTransitionTime":"2026-02-23T09:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.294900 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.294960 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.294979 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.295007 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.295032 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:14Z","lastTransitionTime":"2026-02-23T09:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.398177 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.398262 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.398285 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.398312 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.398333 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:14Z","lastTransitionTime":"2026-02-23T09:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.501824 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.501911 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.501942 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.501978 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.502009 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:14Z","lastTransitionTime":"2026-02-23T09:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.584920 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 02:28:43.382255547 +0000 UTC Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.605169 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.605240 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.605262 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.605296 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.605321 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:14Z","lastTransitionTime":"2026-02-23T09:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.619163 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c5360b0-bcfe-49c7-99dd-87f4e16a1936\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad16faa9445f6f43d8e3f0fda566b1fde83ccbf5a60419cb21f48495cc29b9f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02ab00dc78b9f4a01120f4d13159010cd9e224980671aac02cf91612673f557a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f610cfd6d780a3e8882ec6cf11406234d20578fbf77ce9c9bd307e7da80c4691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748315cd1ff895c9c49ed28ebc47f96eab7f0bfa090ab092dddfcfcffbc4d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://616a1ea1355329722a38418fc7d3b3f6be94b6d0a505aba61d85c755873e5d4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.641185 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:04Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.660165 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.678147 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.699897 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.708361 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.708432 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.708451 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.708478 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.708497 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:14Z","lastTransitionTime":"2026-02-23T09:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.717504 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f86d8e6-b3d5-402a-9f9b-568ac673d63c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe1ece38b380cdd99d8c323c271e3753fdacdbbfb65f9ea0f7d46a4b99443ea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://828b449e3bfa815231a28f30a1f8d4360fc21abd57f0f5fbb2297c41a7116189\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06c52566aed1d53aa400ebba7eb5642d90291f5e0a712fe950ba434a8bd2c342\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-23T09:08:54Z\\\",\\\"message\\\":\\\"le observer\\\\nW0223 09:08:54.279935 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0223 09:08:54.280187 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0223 09:08:54.281744 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1246126551/tls.crt::/tmp/serving-cert-1246126551/tls.key\\\\\\\"\\\\nI0223 09:08:54.648156 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0223 09:08:54.651138 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0223 09:08:54.651159 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0223 09:08:54.651179 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0223 09:08:54.651185 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0223 09:08:54.656437 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0223 09:08:54.656475 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0223 09:08:54.656536 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656560 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656581 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0223 09:08:54.656602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0223 09:08:54.656623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0223 09:08:54.656642 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0223 09:08:54.657691 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-23T09:08:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c99b854342f23f595ebbf00de97b11b8a4bbd92e1bbcfcb876dd8d8139f84b15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.739803 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.749890 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.757872 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a44d4f04-0f7d-4c44-9dc0-1ecd80a10388\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f16a693a9ddada9fa2b8ef889011d4aa81ead7c66bc2245ecf1243371bd5e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.811944 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.811994 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.812012 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.812077 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.812098 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:14Z","lastTransitionTime":"2026-02-23T09:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.915711 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.916096 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.916349 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.916626 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:14 crc kubenswrapper[4834]: I0223 09:09:14.916840 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:14Z","lastTransitionTime":"2026-02-23T09:09:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.020395 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.020512 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.020535 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.020570 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.020589 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:15Z","lastTransitionTime":"2026-02-23T09:09:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.123591 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.123669 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.123711 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.123750 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.123777 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:15Z","lastTransitionTime":"2026-02-23T09:09:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.226990 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.227055 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.227074 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.227101 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.227136 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:15Z","lastTransitionTime":"2026-02-23T09:09:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.329967 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.330086 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.330103 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.330636 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.330697 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:15Z","lastTransitionTime":"2026-02-23T09:09:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.433724 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.433771 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.433781 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.433799 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.433812 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:15Z","lastTransitionTime":"2026-02-23T09:09:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.536569 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.536639 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.536657 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.536684 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.536702 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:15Z","lastTransitionTime":"2026-02-23T09:09:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.585089 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 10:26:18.872195064 +0000 UTC Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.585204 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.585249 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.585272 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:15 crc kubenswrapper[4834]: E0223 09:09:15.585434 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 23 09:09:15 crc kubenswrapper[4834]: E0223 09:09:15.585526 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 23 09:09:15 crc kubenswrapper[4834]: E0223 09:09:15.585623 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.639599 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.639638 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.639650 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.639670 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.639683 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:15Z","lastTransitionTime":"2026-02-23T09:09:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.743144 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.743186 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.743197 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.743214 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.743227 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:15Z","lastTransitionTime":"2026-02-23T09:09:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.845878 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.845936 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.845948 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.845964 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.845974 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:15Z","lastTransitionTime":"2026-02-23T09:09:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.949776 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.949844 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.949864 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.949891 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:15 crc kubenswrapper[4834]: I0223 09:09:15.949912 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:15Z","lastTransitionTime":"2026-02-23T09:09:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.053514 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.053596 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.053618 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.053644 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.053663 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:16Z","lastTransitionTime":"2026-02-23T09:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.157378 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.157459 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.157473 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.157495 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.157508 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:16Z","lastTransitionTime":"2026-02-23T09:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.260338 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.260381 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.260392 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.260430 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.260442 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:16Z","lastTransitionTime":"2026-02-23T09:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.363128 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.363188 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.363203 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.363225 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.363242 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:16Z","lastTransitionTime":"2026-02-23T09:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.465873 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.465921 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.465933 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.465952 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.465965 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:16Z","lastTransitionTime":"2026-02-23T09:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.569685 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.569734 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.569746 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.569766 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.569779 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:16Z","lastTransitionTime":"2026-02-23T09:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.585667 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 21:39:23.094701854 +0000 UTC Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.672956 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.673022 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.673039 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.673067 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.673085 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:16Z","lastTransitionTime":"2026-02-23T09:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.777219 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.777278 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.777289 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.777306 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.777319 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:16Z","lastTransitionTime":"2026-02-23T09:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.880699 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.880766 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.880783 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.880812 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.880831 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:16Z","lastTransitionTime":"2026-02-23T09:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.984779 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.984841 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.984856 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.984878 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:16 crc kubenswrapper[4834]: I0223 09:09:16.984897 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:16Z","lastTransitionTime":"2026-02-23T09:09:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.088911 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.088968 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.088979 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.088996 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.089007 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:17Z","lastTransitionTime":"2026-02-23T09:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.192230 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.192316 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.192333 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.192355 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.192372 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:17Z","lastTransitionTime":"2026-02-23T09:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.296057 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.296126 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.296149 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.296172 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.296189 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:17Z","lastTransitionTime":"2026-02-23T09:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.399651 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.399740 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.399758 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.399785 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.399802 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:17Z","lastTransitionTime":"2026-02-23T09:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.503785 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.503829 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.503838 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.503856 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.503869 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:17Z","lastTransitionTime":"2026-02-23T09:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.585263 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.585304 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.585304 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:17 crc kubenswrapper[4834]: E0223 09:09:17.585533 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 23 09:09:17 crc kubenswrapper[4834]: E0223 09:09:17.585820 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.585901 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 01:28:17.666865882 +0000 UTC Feb 23 09:09:17 crc kubenswrapper[4834]: E0223 09:09:17.586557 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 23 09:09:17 crc kubenswrapper[4834]: E0223 09:09:17.588276 4834 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:iptables-alerter,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2,Command:[/iptables-alerter/iptables-alerter.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONTAINER_RUNTIME_ENDPOINT,Value:unix:///run/crio/crio.sock,ValueFrom:nil,},EnvVar{Name:ALERTER_POD_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.name,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{68157440 0} {} 65Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:iptables-alerter-script,ReadOnly:false,MountPath:/iptables-alerter,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:host-slash,ReadOnly:true,MountPath:/host,SubPath:,MountPropagation:*HostToContainer,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rczfb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:*true,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod iptables-alerter-4ln5h_openshift-network-operator(d75a4c96-2883-4a0b-bab2-0fab2b6c0b49): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars" logger="UnhandledError" Feb 23 09:09:17 crc kubenswrapper[4834]: E0223 09:09:17.589475 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"iptables-alerter\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"" pod="openshift-network-operator/iptables-alerter-4ln5h" podUID="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" Feb 23 09:09:17 crc kubenswrapper[4834]: E0223 09:09:17.591656 4834 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 23 09:09:17 crc kubenswrapper[4834]: container &Container{Name:webhook,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c set -xe Feb 23 09:09:17 crc kubenswrapper[4834]: if [[ -f "/env/_master" ]]; then Feb 23 09:09:17 crc kubenswrapper[4834]: set -o allexport Feb 23 09:09:17 crc kubenswrapper[4834]: source "/env/_master" Feb 23 09:09:17 crc kubenswrapper[4834]: set +o allexport Feb 23 09:09:17 crc kubenswrapper[4834]: fi Feb 23 09:09:17 crc kubenswrapper[4834]: # OVN-K will try to remove hybrid overlay node annotations even when the hybrid overlay is not enabled. Feb 23 09:09:17 crc kubenswrapper[4834]: # https://github.com/ovn-org/ovn-kubernetes/blob/ac6820df0b338a246f10f412cd5ec903bd234694/go-controller/pkg/ovn/master.go#L791 Feb 23 09:09:17 crc kubenswrapper[4834]: ho_enable="--enable-hybrid-overlay" Feb 23 09:09:17 crc kubenswrapper[4834]: echo "I$(date "+%m%d %H:%M:%S.%N") - network-node-identity - start webhook" Feb 23 09:09:17 crc kubenswrapper[4834]: # extra-allowed-user: service account `ovn-kubernetes-control-plane` Feb 23 09:09:17 crc kubenswrapper[4834]: # sets pod annotations in multi-homing layer3 network controller (cluster-manager) Feb 23 09:09:17 crc kubenswrapper[4834]: exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 \ Feb 23 09:09:17 crc kubenswrapper[4834]: --webhook-cert-dir="/etc/webhook-cert" \ Feb 23 09:09:17 crc kubenswrapper[4834]: --webhook-host=127.0.0.1 \ Feb 23 09:09:17 crc kubenswrapper[4834]: --webhook-port=9743 \ Feb 23 09:09:17 crc kubenswrapper[4834]: ${ho_enable} \ Feb 23 09:09:17 crc kubenswrapper[4834]: --enable-interconnect \ Feb 23 09:09:17 crc kubenswrapper[4834]: --disable-approver \ Feb 23 09:09:17 crc kubenswrapper[4834]: --extra-allowed-user="system:serviceaccount:openshift-ovn-kubernetes:ovn-kubernetes-control-plane" \ Feb 23 09:09:17 crc kubenswrapper[4834]: --wait-for-kubernetes-api=200s \ Feb 23 09:09:17 crc kubenswrapper[4834]: --pod-admission-conditions="/var/run/ovnkube-identity-config/additional-pod-admission-cond.json" \ Feb 23 09:09:17 crc kubenswrapper[4834]: --loglevel="${LOGLEVEL}" Feb 23 09:09:17 crc kubenswrapper[4834]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LOGLEVEL,Value:2,ValueFrom:nil,},EnvVar{Name:KUBERNETES_NODE_NAME,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:spec.nodeName,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:webhook-cert,ReadOnly:false,MountPath:/etc/webhook-cert/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Feb 23 09:09:17 crc kubenswrapper[4834]: > logger="UnhandledError" Feb 23 09:09:17 crc kubenswrapper[4834]: E0223 09:09:17.596191 4834 kuberuntime_manager.go:1274] "Unhandled Error" err=< Feb 23 09:09:17 crc kubenswrapper[4834]: container &Container{Name:approver,Image:quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2,Command:[/bin/bash -c set -xe Feb 23 09:09:17 crc kubenswrapper[4834]: if [[ -f "/env/_master" ]]; then Feb 23 09:09:17 crc kubenswrapper[4834]: set -o allexport Feb 23 09:09:17 crc kubenswrapper[4834]: source "/env/_master" Feb 23 09:09:17 crc kubenswrapper[4834]: set +o allexport Feb 23 09:09:17 crc kubenswrapper[4834]: fi Feb 23 09:09:17 crc kubenswrapper[4834]: Feb 23 09:09:17 crc kubenswrapper[4834]: echo "I$(date "+%m%d %H:%M:%S.%N") - network-node-identity - start approver" Feb 23 09:09:17 crc kubenswrapper[4834]: exec /usr/bin/ovnkube-identity --k8s-apiserver=https://api-int.crc.testing:6443 \ Feb 23 09:09:17 crc kubenswrapper[4834]: --disable-webhook \ Feb 23 09:09:17 crc kubenswrapper[4834]: --csr-acceptance-conditions="/var/run/ovnkube-identity-config/additional-cert-acceptance-cond.json" \ Feb 23 09:09:17 crc kubenswrapper[4834]: --loglevel="${LOGLEVEL}" Feb 23 09:09:17 crc kubenswrapper[4834]: ],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LOGLEVEL,Value:4,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{52428800 0} {} 50Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:env-overrides,ReadOnly:false,MountPath:/env,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovnkube-identity-cm,ReadOnly:false,MountPath:/var/run/ovnkube-identity-config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-s2kz5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000470000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod network-node-identity-vrzqb_openshift-network-node-identity(ef543e1b-8068-4ea3-b32a-61027b32e95d): CreateContainerConfigError: services have not yet been read at least once, cannot construct envvars Feb 23 09:09:17 crc kubenswrapper[4834]: > logger="UnhandledError" Feb 23 09:09:17 crc kubenswrapper[4834]: E0223 09:09:17.597602 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"webhook\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\", failed to \"StartContainer\" for \"approver\" with CreateContainerConfigError: \"services have not yet been read at least once, cannot construct envvars\"]" pod="openshift-network-node-identity/network-node-identity-vrzqb" podUID="ef543e1b-8068-4ea3-b32a-61027b32e95d" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.606569 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.606661 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.606687 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.606722 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.606744 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:17Z","lastTransitionTime":"2026-02-23T09:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.709300 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.709912 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.710149 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.710353 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.710598 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:17Z","lastTransitionTime":"2026-02-23T09:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.814210 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.814278 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.814295 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.814319 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.814335 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:17Z","lastTransitionTime":"2026-02-23T09:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.917499 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.917583 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.917607 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.917638 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:17 crc kubenswrapper[4834]: I0223 09:09:17.917664 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:17Z","lastTransitionTime":"2026-02-23T09:09:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.021009 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.021063 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.021075 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.021123 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.021140 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:18Z","lastTransitionTime":"2026-02-23T09:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.124002 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.124052 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.124061 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.124077 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.124089 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:18Z","lastTransitionTime":"2026-02-23T09:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.227231 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.227297 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.227310 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.227329 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.227341 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:18Z","lastTransitionTime":"2026-02-23T09:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.330339 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.330423 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.330433 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.330451 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.330464 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:18Z","lastTransitionTime":"2026-02-23T09:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.433036 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.433140 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.433165 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.433196 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.433220 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:18Z","lastTransitionTime":"2026-02-23T09:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.536783 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.536854 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.536872 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.536895 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.536908 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:18Z","lastTransitionTime":"2026-02-23T09:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.586078 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 21:56:19.85298924 +0000 UTC Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.640530 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.640576 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.640587 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.640605 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.640616 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:18Z","lastTransitionTime":"2026-02-23T09:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.701658 4834 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.744268 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.744326 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.744341 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.744360 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.744373 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:18Z","lastTransitionTime":"2026-02-23T09:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.847213 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.847267 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.847279 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.847299 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.847311 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:18Z","lastTransitionTime":"2026-02-23T09:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.951268 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.951324 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.951342 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.951363 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:18 crc kubenswrapper[4834]: I0223 09:09:18.951377 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:18Z","lastTransitionTime":"2026-02-23T09:09:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.054887 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.054964 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.054982 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.055010 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.055029 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:19Z","lastTransitionTime":"2026-02-23T09:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.159247 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.159309 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.159329 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.159351 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.159367 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:19Z","lastTransitionTime":"2026-02-23T09:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.263085 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.263155 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.263182 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.263210 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.263230 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:19Z","lastTransitionTime":"2026-02-23T09:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.367964 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.368052 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.368070 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.368098 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.368119 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:19Z","lastTransitionTime":"2026-02-23T09:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.423302 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.423566 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:19 crc kubenswrapper[4834]: E0223 09:09:19.423618 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:35.423567793 +0000 UTC m=+111.501882210 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.423697 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.423772 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:19 crc kubenswrapper[4834]: E0223 09:09:19.423818 4834 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.423823 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:19 crc kubenswrapper[4834]: E0223 09:09:19.423948 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:35.423910873 +0000 UTC m=+111.502225300 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 23 09:09:19 crc kubenswrapper[4834]: E0223 09:09:19.423956 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 23 09:09:19 crc kubenswrapper[4834]: E0223 09:09:19.424076 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 23 09:09:19 crc kubenswrapper[4834]: E0223 09:09:19.424106 4834 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:19 crc kubenswrapper[4834]: E0223 09:09:19.423995 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 23 09:09:19 crc kubenswrapper[4834]: E0223 09:09:19.424182 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 23 09:09:19 crc kubenswrapper[4834]: E0223 09:09:19.424205 4834 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:19 crc kubenswrapper[4834]: E0223 09:09:19.424224 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:35.42417293 +0000 UTC m=+111.502487347 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:19 crc kubenswrapper[4834]: E0223 09:09:19.424289 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:35.424263092 +0000 UTC m=+111.502577679 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:19 crc kubenswrapper[4834]: E0223 09:09:19.424004 4834 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 23 09:09:19 crc kubenswrapper[4834]: E0223 09:09:19.424348 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:35.424335684 +0000 UTC m=+111.502650311 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.471276 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.471342 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.471356 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.471377 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.471391 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:19Z","lastTransitionTime":"2026-02-23T09:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.574477 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.574600 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.574627 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.574664 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.574691 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:19Z","lastTransitionTime":"2026-02-23T09:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.585140 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.585235 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.585140 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:19 crc kubenswrapper[4834]: E0223 09:09:19.585332 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 23 09:09:19 crc kubenswrapper[4834]: E0223 09:09:19.585470 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 23 09:09:19 crc kubenswrapper[4834]: E0223 09:09:19.585537 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.587232 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 12:40:20.218476162 +0000 UTC Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.677272 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.677339 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.677351 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.677372 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.677389 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:19Z","lastTransitionTime":"2026-02-23T09:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.780324 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.780390 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.780419 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.780441 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.780453 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:19Z","lastTransitionTime":"2026-02-23T09:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.883630 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.883695 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.883708 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.883730 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.883746 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:19Z","lastTransitionTime":"2026-02-23T09:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.986551 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.986610 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.986621 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.986639 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:19 crc kubenswrapper[4834]: I0223 09:09:19.986652 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:19Z","lastTransitionTime":"2026-02-23T09:09:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.089627 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.089687 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.089698 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.089717 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.089728 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:20Z","lastTransitionTime":"2026-02-23T09:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.192627 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.192692 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.192705 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.192728 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.192744 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:20Z","lastTransitionTime":"2026-02-23T09:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.296390 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.296514 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.296533 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.296558 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.296578 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:20Z","lastTransitionTime":"2026-02-23T09:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.400069 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.400146 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.400159 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.400180 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.400196 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:20Z","lastTransitionTime":"2026-02-23T09:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.503184 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.503254 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.503275 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.503305 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.503326 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:20Z","lastTransitionTime":"2026-02-23T09:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.587341 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 10:18:12.634172645 +0000 UTC Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.606619 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.606735 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.606757 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.606823 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.606846 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:20Z","lastTransitionTime":"2026-02-23T09:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.710175 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.710243 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.710267 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.710299 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.710321 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:20Z","lastTransitionTime":"2026-02-23T09:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.813676 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.813738 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.813755 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.813778 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.813794 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:20Z","lastTransitionTime":"2026-02-23T09:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.917475 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.917534 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.917547 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.917567 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:20 crc kubenswrapper[4834]: I0223 09:09:20.917584 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:20Z","lastTransitionTime":"2026-02-23T09:09:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.002501 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"99ab242a4e943c610bcfa7f3388d31dbb45813d765df811d4e5a1d1b0a2d1adc"} Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.022538 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.022588 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.022598 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.022621 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.022632 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:21Z","lastTransitionTime":"2026-02-23T09:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.025970 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.026008 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.026017 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.026033 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.026044 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:21Z","lastTransitionTime":"2026-02-23T09:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.030830 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c5360b0-bcfe-49c7-99dd-87f4e16a1936\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad16faa9445f6f43d8e3f0fda566b1fde83ccbf5a60419cb21f48495cc29b9f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02ab00dc78b9f4a01120f4d13159010cd9e224980671aac02cf91612673f557a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f610cfd6d780a3e8882ec6cf11406234d20578fbf77ce9c9bd307e7da80c4691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748315cd1ff895c9c49ed28ebc47f96eab7f0bfa090ab092dddfcfcffbc4d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://616a1ea1355329722a38418fc7d3b3f6be94b6d0a505aba61d85c755873e5d4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:21 crc kubenswrapper[4834]: E0223 09:09:21.039008 4834 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a013d5d2-c870-42c6-8fef-9285815a5771\\\",\\\"systemUUID\\\":\\\"3bc3bfda-fc9c-4fdf-b927-7c29d17cae8a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.043881 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.043934 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.043952 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.043973 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.043984 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:21Z","lastTransitionTime":"2026-02-23T09:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.044655 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:04Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99ab242a4e943c610bcfa7f3388d31dbb45813d765df811d4e5a1d1b0a2d1adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.055035 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:21 crc kubenswrapper[4834]: E0223 09:09:21.055580 4834 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a013d5d2-c870-42c6-8fef-9285815a5771\\\",\\\"systemUUID\\\":\\\"3bc3bfda-fc9c-4fdf-b927-7c29d17cae8a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.059643 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.059708 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.059723 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.059759 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.059774 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:21Z","lastTransitionTime":"2026-02-23T09:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.066957 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:21 crc kubenswrapper[4834]: E0223 09:09:21.070900 4834 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a013d5d2-c870-42c6-8fef-9285815a5771\\\",\\\"systemUUID\\\":\\\"3bc3bfda-fc9c-4fdf-b927-7c29d17cae8a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.075063 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.075099 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.075109 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.075127 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.075140 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:21Z","lastTransitionTime":"2026-02-23T09:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.078961 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:21 crc kubenswrapper[4834]: E0223 09:09:21.086353 4834 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a013d5d2-c870-42c6-8fef-9285815a5771\\\",\\\"systemUUID\\\":\\\"3bc3bfda-fc9c-4fdf-b927-7c29d17cae8a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.091292 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.091341 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.091352 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.091373 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.091389 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:21Z","lastTransitionTime":"2026-02-23T09:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.091523 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f86d8e6-b3d5-402a-9f9b-568ac673d63c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe1ece38b380cdd99d8c323c271e3753fdacdbbfb65f9ea0f7d46a4b99443ea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://828b449e3bfa815231a28f30a1f8d4360fc21abd57f0f5fbb2297c41a7116189\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06c52566aed1d53aa400ebba7eb5642d90291f5e0a712fe950ba434a8bd2c342\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-23T09:08:54Z\\\",\\\"message\\\":\\\"le observer\\\\nW0223 09:08:54.279935 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0223 09:08:54.280187 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0223 09:08:54.281744 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1246126551/tls.crt::/tmp/serving-cert-1246126551/tls.key\\\\\\\"\\\\nI0223 09:08:54.648156 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0223 09:08:54.651138 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0223 09:08:54.651159 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0223 09:08:54.651179 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0223 09:08:54.651185 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0223 09:08:54.656437 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0223 09:08:54.656475 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0223 09:08:54.656536 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656560 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656581 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0223 09:08:54.656602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0223 09:08:54.656623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0223 09:08:54.656642 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0223 09:08:54.657691 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-23T09:08:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c99b854342f23f595ebbf00de97b11b8a4bbd92e1bbcfcb876dd8d8139f84b15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.105532 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:21 crc kubenswrapper[4834]: E0223 09:09:21.106335 4834 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"a013d5d2-c870-42c6-8fef-9285815a5771\\\",\\\"systemUUID\\\":\\\"3bc3bfda-fc9c-4fdf-b927-7c29d17cae8a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:21 crc kubenswrapper[4834]: E0223 09:09:21.106494 4834 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.119245 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.125703 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.125759 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.125780 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.125801 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.125814 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:21Z","lastTransitionTime":"2026-02-23T09:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.132561 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a44d4f04-0f7d-4c44-9dc0-1ecd80a10388\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f16a693a9ddada9fa2b8ef889011d4aa81ead7c66bc2245ecf1243371bd5e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.229383 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.229455 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.229464 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.229484 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.229495 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:21Z","lastTransitionTime":"2026-02-23T09:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.331801 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.331863 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.331875 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.331895 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.331909 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:21Z","lastTransitionTime":"2026-02-23T09:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.435012 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.435065 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.435077 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.435100 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.435114 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:21Z","lastTransitionTime":"2026-02-23T09:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.538617 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.538671 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.538682 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.538698 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.538708 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:21Z","lastTransitionTime":"2026-02-23T09:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.584842 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.584933 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:21 crc kubenswrapper[4834]: E0223 09:09:21.585052 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.584967 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:21 crc kubenswrapper[4834]: E0223 09:09:21.585137 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 23 09:09:21 crc kubenswrapper[4834]: E0223 09:09:21.585262 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.588161 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-30 22:40:56.032245779 +0000 UTC Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.641927 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.641988 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.642001 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.642022 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.642035 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:21Z","lastTransitionTime":"2026-02-23T09:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.745515 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.745557 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.745567 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.745582 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.745593 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:21Z","lastTransitionTime":"2026-02-23T09:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.848536 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.848584 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.848597 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.848616 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.848626 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:21Z","lastTransitionTime":"2026-02-23T09:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.951568 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.951613 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.951622 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.951640 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:21 crc kubenswrapper[4834]: I0223 09:09:21.951654 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:21Z","lastTransitionTime":"2026-02-23T09:09:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.054486 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.054524 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.054534 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.054551 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.054561 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:22Z","lastTransitionTime":"2026-02-23T09:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.157422 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.157493 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.157508 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.157528 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.157540 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:22Z","lastTransitionTime":"2026-02-23T09:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.260245 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.260307 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.260325 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.260350 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.260371 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:22Z","lastTransitionTime":"2026-02-23T09:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.363745 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.363793 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.363804 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.363824 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.363839 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:22Z","lastTransitionTime":"2026-02-23T09:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.467368 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.467446 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.467460 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.467480 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.467493 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:22Z","lastTransitionTime":"2026-02-23T09:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.570122 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.570169 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.570183 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.570201 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.570216 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:22Z","lastTransitionTime":"2026-02-23T09:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.585158 4834 scope.go:117] "RemoveContainer" containerID="081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d" Feb 23 09:09:22 crc kubenswrapper[4834]: E0223 09:09:22.585344 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.588928 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 18:28:01.305280938 +0000 UTC Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.672885 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.672926 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.672941 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.672961 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.672976 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:22Z","lastTransitionTime":"2026-02-23T09:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.776375 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.776483 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.776507 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.776538 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.776560 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:22Z","lastTransitionTime":"2026-02-23T09:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.879996 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.880040 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.880057 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.880080 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.880097 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:22Z","lastTransitionTime":"2026-02-23T09:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.983445 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.983500 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.983510 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.983536 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:22 crc kubenswrapper[4834]: I0223 09:09:22.983550 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:22Z","lastTransitionTime":"2026-02-23T09:09:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.086317 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.086373 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.086388 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.086451 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.086473 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:23Z","lastTransitionTime":"2026-02-23T09:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.189208 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.189262 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.189277 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.189298 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.189312 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:23Z","lastTransitionTime":"2026-02-23T09:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.292621 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.292678 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.292689 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.292709 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.292723 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:23Z","lastTransitionTime":"2026-02-23T09:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.396656 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.396714 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.396735 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.396753 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.396768 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:23Z","lastTransitionTime":"2026-02-23T09:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.500040 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.500086 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.500108 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.500130 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.500142 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:23Z","lastTransitionTime":"2026-02-23T09:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.585231 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.585265 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:23 crc kubenswrapper[4834]: E0223 09:09:23.585545 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.585567 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:23 crc kubenswrapper[4834]: E0223 09:09:23.585703 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 23 09:09:23 crc kubenswrapper[4834]: E0223 09:09:23.585894 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.589355 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 23:21:52.775224676 +0000 UTC Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.604209 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.604270 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.604291 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.604316 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.604336 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:23Z","lastTransitionTime":"2026-02-23T09:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.706931 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.706986 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.707002 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.707025 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.707043 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:23Z","lastTransitionTime":"2026-02-23T09:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.810838 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.810944 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.810967 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.810991 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.811018 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:23Z","lastTransitionTime":"2026-02-23T09:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.914565 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.914619 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.914642 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.914663 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:23 crc kubenswrapper[4834]: I0223 09:09:23.914678 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:23Z","lastTransitionTime":"2026-02-23T09:09:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.018709 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.018807 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.018825 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.018855 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.018873 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:24Z","lastTransitionTime":"2026-02-23T09:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.122288 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.122356 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.122372 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.122393 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.122442 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:24Z","lastTransitionTime":"2026-02-23T09:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.225851 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.225902 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.225913 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.225931 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.225950 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:24Z","lastTransitionTime":"2026-02-23T09:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.333779 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.333836 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.333889 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.333922 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.333933 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:24Z","lastTransitionTime":"2026-02-23T09:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.437574 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.437635 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.437648 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.437671 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.437686 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:24Z","lastTransitionTime":"2026-02-23T09:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.541306 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.541376 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.541389 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.541424 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.541437 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:24Z","lastTransitionTime":"2026-02-23T09:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.592242 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-23 23:48:43.05935387 +0000 UTC Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.602504 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.617229 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.631314 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.644182 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.644254 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.644264 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.644281 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.644315 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:24Z","lastTransitionTime":"2026-02-23T09:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.649923 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f86d8e6-b3d5-402a-9f9b-568ac673d63c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe1ece38b380cdd99d8c323c271e3753fdacdbbfb65f9ea0f7d46a4b99443ea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://828b449e3bfa815231a28f30a1f8d4360fc21abd57f0f5fbb2297c41a7116189\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06c52566aed1d53aa400ebba7eb5642d90291f5e0a712fe950ba434a8bd2c342\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-23T09:08:54Z\\\",\\\"message\\\":\\\"le observer\\\\nW0223 09:08:54.279935 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0223 09:08:54.280187 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0223 09:08:54.281744 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1246126551/tls.crt::/tmp/serving-cert-1246126551/tls.key\\\\\\\"\\\\nI0223 09:08:54.648156 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0223 09:08:54.651138 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0223 09:08:54.651159 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0223 09:08:54.651179 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0223 09:08:54.651185 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0223 09:08:54.656437 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0223 09:08:54.656475 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0223 09:08:54.656536 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656560 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656581 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0223 09:08:54.656602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0223 09:08:54.656623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0223 09:08:54.656642 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0223 09:08:54.657691 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-23T09:08:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c99b854342f23f595ebbf00de97b11b8a4bbd92e1bbcfcb876dd8d8139f84b15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.671743 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c5360b0-bcfe-49c7-99dd-87f4e16a1936\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad16faa9445f6f43d8e3f0fda566b1fde83ccbf5a60419cb21f48495cc29b9f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02ab00dc78b9f4a01120f4d13159010cd9e224980671aac02cf91612673f557a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f610cfd6d780a3e8882ec6cf11406234d20578fbf77ce9c9bd307e7da80c4691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748315cd1ff895c9c49ed28ebc47f96eab7f0bfa090ab092dddfcfcffbc4d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://616a1ea1355329722a38418fc7d3b3f6be94b6d0a505aba61d85c755873e5d4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.685256 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:04Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99ab242a4e943c610bcfa7f3388d31dbb45813d765df811d4e5a1d1b0a2d1adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.699769 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a44d4f04-0f7d-4c44-9dc0-1ecd80a10388\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f16a693a9ddada9fa2b8ef889011d4aa81ead7c66bc2245ecf1243371bd5e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.715872 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.728103 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.747353 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.747477 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.747498 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.747532 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.747551 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:24Z","lastTransitionTime":"2026-02-23T09:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.850693 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.850746 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.850758 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.850774 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.850784 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:24Z","lastTransitionTime":"2026-02-23T09:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.953731 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.953866 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.953885 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.953908 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:24 crc kubenswrapper[4834]: I0223 09:09:24.953922 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:24Z","lastTransitionTime":"2026-02-23T09:09:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.056713 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.056749 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.056758 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.056782 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.056793 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:25Z","lastTransitionTime":"2026-02-23T09:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.159387 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.159467 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.159478 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.159518 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.159534 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:25Z","lastTransitionTime":"2026-02-23T09:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.261905 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.261938 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.261947 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.261963 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.261979 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:25Z","lastTransitionTime":"2026-02-23T09:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.365032 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.365092 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.365101 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.365120 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.365130 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:25Z","lastTransitionTime":"2026-02-23T09:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.468551 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.468641 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.468660 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.468708 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.468725 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:25Z","lastTransitionTime":"2026-02-23T09:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.571499 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.571541 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.571554 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.571574 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.571586 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:25Z","lastTransitionTime":"2026-02-23T09:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.584244 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.584244 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:25 crc kubenswrapper[4834]: E0223 09:09:25.584386 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.584263 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:25 crc kubenswrapper[4834]: E0223 09:09:25.584638 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 23 09:09:25 crc kubenswrapper[4834]: E0223 09:09:25.584695 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.592791 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 18:36:33.280591018 +0000 UTC Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.673992 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.674032 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.674041 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.674055 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.674064 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:25Z","lastTransitionTime":"2026-02-23T09:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.777385 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.777482 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.777501 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.777530 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.777552 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:25Z","lastTransitionTime":"2026-02-23T09:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.880872 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.880915 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.880924 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.880941 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.880952 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:25Z","lastTransitionTime":"2026-02-23T09:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.984052 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.984178 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.984192 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.984214 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:25 crc kubenswrapper[4834]: I0223 09:09:25.984251 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:25Z","lastTransitionTime":"2026-02-23T09:09:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.086993 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.087043 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.087058 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.087075 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.087086 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:26Z","lastTransitionTime":"2026-02-23T09:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.190061 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.190129 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.190146 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.190168 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.190182 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:26Z","lastTransitionTime":"2026-02-23T09:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.293138 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.293197 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.293209 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.293232 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.293252 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:26Z","lastTransitionTime":"2026-02-23T09:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.396584 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.396637 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.396652 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.396679 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.396696 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:26Z","lastTransitionTime":"2026-02-23T09:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.498676 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.498718 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.498730 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.498745 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.498754 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:26Z","lastTransitionTime":"2026-02-23T09:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.593230 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 21:23:43.519144175 +0000 UTC Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.601318 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.601375 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.601390 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.601427 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.601439 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:26Z","lastTransitionTime":"2026-02-23T09:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.704662 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.704716 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.704729 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.704755 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.704767 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:26Z","lastTransitionTime":"2026-02-23T09:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.807747 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.807800 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.807809 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.807827 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.807837 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:26Z","lastTransitionTime":"2026-02-23T09:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.911141 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.911215 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.911227 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.911248 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:26 crc kubenswrapper[4834]: I0223 09:09:26.911265 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:26Z","lastTransitionTime":"2026-02-23T09:09:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.014503 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.014552 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.014565 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.014585 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.014599 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:27Z","lastTransitionTime":"2026-02-23T09:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.112892 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-6ml2v"] Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.113228 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-6ml2v" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.114947 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.116535 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.116782 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.116855 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.116872 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.116900 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.116920 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:27Z","lastTransitionTime":"2026-02-23T09:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.117965 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.124811 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a44d4f04-0f7d-4c44-9dc0-1ecd80a10388\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f16a693a9ddada9fa2b8ef889011d4aa81ead7c66bc2245ecf1243371bd5e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.137886 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.149765 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.171147 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c5360b0-bcfe-49c7-99dd-87f4e16a1936\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad16faa9445f6f43d8e3f0fda566b1fde83ccbf5a60419cb21f48495cc29b9f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02ab00dc78b9f4a01120f4d13159010cd9e224980671aac02cf91612673f557a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f610cfd6d780a3e8882ec6cf11406234d20578fbf77ce9c9bd307e7da80c4691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748315cd1ff895c9c49ed28ebc47f96eab7f0bfa090ab092dddfcfcffbc4d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://616a1ea1355329722a38418fc7d3b3f6be94b6d0a505aba61d85c755873e5d4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.187561 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:04Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99ab242a4e943c610bcfa7f3388d31dbb45813d765df811d4e5a1d1b0a2d1adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.197904 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.210721 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.220342 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.220393 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.220424 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.220443 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.220454 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:27Z","lastTransitionTime":"2026-02-23T09:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.222271 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.231797 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6ml2v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4906a4ab-383b-45d4-a1e4-5a849346b4e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rssfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6ml2v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.243086 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f86d8e6-b3d5-402a-9f9b-568ac673d63c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe1ece38b380cdd99d8c323c271e3753fdacdbbfb65f9ea0f7d46a4b99443ea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://828b449e3bfa815231a28f30a1f8d4360fc21abd57f0f5fbb2297c41a7116189\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06c52566aed1d53aa400ebba7eb5642d90291f5e0a712fe950ba434a8bd2c342\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-23T09:08:54Z\\\",\\\"message\\\":\\\"le observer\\\\nW0223 09:08:54.279935 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0223 09:08:54.280187 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0223 09:08:54.281744 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1246126551/tls.crt::/tmp/serving-cert-1246126551/tls.key\\\\\\\"\\\\nI0223 09:08:54.648156 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0223 09:08:54.651138 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0223 09:08:54.651159 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0223 09:08:54.651179 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0223 09:08:54.651185 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0223 09:08:54.656437 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0223 09:08:54.656475 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0223 09:08:54.656536 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656560 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656581 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0223 09:08:54.656602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0223 09:08:54.656623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0223 09:08:54.656642 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0223 09:08:54.657691 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-23T09:08:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c99b854342f23f595ebbf00de97b11b8a4bbd92e1bbcfcb876dd8d8139f84b15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.298769 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/4906a4ab-383b-45d4-a1e4-5a849346b4e6-hosts-file\") pod \"node-resolver-6ml2v\" (UID: \"4906a4ab-383b-45d4-a1e4-5a849346b4e6\") " pod="openshift-dns/node-resolver-6ml2v" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.298885 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rssfs\" (UniqueName: \"kubernetes.io/projected/4906a4ab-383b-45d4-a1e4-5a849346b4e6-kube-api-access-rssfs\") pod \"node-resolver-6ml2v\" (UID: \"4906a4ab-383b-45d4-a1e4-5a849346b4e6\") " pod="openshift-dns/node-resolver-6ml2v" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.322814 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.322883 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.322896 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.322915 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.322927 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:27Z","lastTransitionTime":"2026-02-23T09:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.400059 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rssfs\" (UniqueName: \"kubernetes.io/projected/4906a4ab-383b-45d4-a1e4-5a849346b4e6-kube-api-access-rssfs\") pod \"node-resolver-6ml2v\" (UID: \"4906a4ab-383b-45d4-a1e4-5a849346b4e6\") " pod="openshift-dns/node-resolver-6ml2v" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.400124 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/4906a4ab-383b-45d4-a1e4-5a849346b4e6-hosts-file\") pod \"node-resolver-6ml2v\" (UID: \"4906a4ab-383b-45d4-a1e4-5a849346b4e6\") " pod="openshift-dns/node-resolver-6ml2v" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.400221 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/4906a4ab-383b-45d4-a1e4-5a849346b4e6-hosts-file\") pod \"node-resolver-6ml2v\" (UID: \"4906a4ab-383b-45d4-a1e4-5a849346b4e6\") " pod="openshift-dns/node-resolver-6ml2v" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.422694 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rssfs\" (UniqueName: \"kubernetes.io/projected/4906a4ab-383b-45d4-a1e4-5a849346b4e6-kube-api-access-rssfs\") pod \"node-resolver-6ml2v\" (UID: \"4906a4ab-383b-45d4-a1e4-5a849346b4e6\") " pod="openshift-dns/node-resolver-6ml2v" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.425448 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.425500 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.425511 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.425529 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.425543 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:27Z","lastTransitionTime":"2026-02-23T09:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.430941 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-6ml2v" Feb 23 09:09:27 crc kubenswrapper[4834]: W0223 09:09:27.452381 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4906a4ab_383b_45d4_a1e4_5a849346b4e6.slice/crio-3609ac4ef6f44fda91d4470d67b8c480d555b6cce29ebe49806497b80cb1da04 WatchSource:0}: Error finding container 3609ac4ef6f44fda91d4470d67b8c480d555b6cce29ebe49806497b80cb1da04: Status 404 returned error can't find the container with id 3609ac4ef6f44fda91d4470d67b8c480d555b6cce29ebe49806497b80cb1da04 Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.464616 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-n556f"] Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.464981 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.468191 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.468270 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.468475 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.469125 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.469697 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-gtpxs"] Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.469906 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.470373 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-kt9lp"] Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.470536 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.472832 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.475007 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.477635 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.482662 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.483145 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.483265 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.483256 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.483958 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.484634 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:04Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99ab242a4e943c610bcfa7f3388d31dbb45813d765df811d4e5a1d1b0a2d1adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.500989 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.514789 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.527704 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.528625 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.528674 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.528687 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.528708 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.528721 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:27Z","lastTransitionTime":"2026-02-23T09:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.538424 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6ml2v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4906a4ab-383b-45d4-a1e4-5a849346b4e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rssfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6ml2v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.552054 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f86d8e6-b3d5-402a-9f9b-568ac673d63c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe1ece38b380cdd99d8c323c271e3753fdacdbbfb65f9ea0f7d46a4b99443ea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://828b449e3bfa815231a28f30a1f8d4360fc21abd57f0f5fbb2297c41a7116189\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06c52566aed1d53aa400ebba7eb5642d90291f5e0a712fe950ba434a8bd2c342\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-23T09:08:54Z\\\",\\\"message\\\":\\\"le observer\\\\nW0223 09:08:54.279935 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0223 09:08:54.280187 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0223 09:08:54.281744 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1246126551/tls.crt::/tmp/serving-cert-1246126551/tls.key\\\\\\\"\\\\nI0223 09:08:54.648156 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0223 09:08:54.651138 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0223 09:08:54.651159 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0223 09:08:54.651179 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0223 09:08:54.651185 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0223 09:08:54.656437 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0223 09:08:54.656475 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0223 09:08:54.656536 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656560 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656581 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0223 09:08:54.656602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0223 09:08:54.656623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0223 09:08:54.656642 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0223 09:08:54.657691 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-23T09:08:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c99b854342f23f595ebbf00de97b11b8a4bbd92e1bbcfcb876dd8d8139f84b15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.571143 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c5360b0-bcfe-49c7-99dd-87f4e16a1936\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad16faa9445f6f43d8e3f0fda566b1fde83ccbf5a60419cb21f48495cc29b9f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02ab00dc78b9f4a01120f4d13159010cd9e224980671aac02cf91612673f557a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f610cfd6d780a3e8882ec6cf11406234d20578fbf77ce9c9bd307e7da80c4691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748315cd1ff895c9c49ed28ebc47f96eab7f0bfa090ab092dddfcfcffbc4d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://616a1ea1355329722a38418fc7d3b3f6be94b6d0a505aba61d85c755873e5d4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.584843 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.584889 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.584840 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.584864 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:27 crc kubenswrapper[4834]: E0223 09:09:27.585042 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 23 09:09:27 crc kubenswrapper[4834]: E0223 09:09:27.585173 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 23 09:09:27 crc kubenswrapper[4834]: E0223 09:09:27.585292 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.593446 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 16:55:06.682860359 +0000 UTC Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.598069 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n556f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqdgd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n556f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.602728 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-host-run-k8s-cni-cncf-io\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.602814 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-multus-socket-dir-parent\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.602836 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/5a0627a9-533e-4723-b891-80dc7a5b611e-cnibin\") pod \"multus-additional-cni-plugins-gtpxs\" (UID: \"5a0627a9-533e-4723-b891-80dc7a5b611e\") " pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.602883 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/5a0627a9-533e-4723-b891-80dc7a5b611e-cni-binary-copy\") pod \"multus-additional-cni-plugins-gtpxs\" (UID: \"5a0627a9-533e-4723-b891-80dc7a5b611e\") " pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.602904 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-os-release\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.602920 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-cni-binary-copy\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.602934 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-multus-cni-dir\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.602948 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/5a0627a9-533e-4723-b891-80dc7a5b611e-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gtpxs\" (UID: \"5a0627a9-533e-4723-b891-80dc7a5b611e\") " pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.602974 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77qrb\" (UniqueName: \"kubernetes.io/projected/1172b9a5-71ca-49e9-a033-3b59c9c024a4-kube-api-access-77qrb\") pod \"machine-config-daemon-kt9lp\" (UID: \"1172b9a5-71ca-49e9-a033-3b59c9c024a4\") " pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.602991 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-host-run-multus-certs\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.603005 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-etc-kubernetes\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.603023 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-cnibin\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.603038 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1172b9a5-71ca-49e9-a033-3b59c9c024a4-proxy-tls\") pod \"machine-config-daemon-kt9lp\" (UID: \"1172b9a5-71ca-49e9-a033-3b59c9c024a4\") " pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.603052 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-host-run-netns\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.603071 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-host-var-lib-cni-bin\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.603085 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/5a0627a9-533e-4723-b891-80dc7a5b611e-os-release\") pod \"multus-additional-cni-plugins-gtpxs\" (UID: \"5a0627a9-533e-4723-b891-80dc7a5b611e\") " pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.603101 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/5a0627a9-533e-4723-b891-80dc7a5b611e-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gtpxs\" (UID: \"5a0627a9-533e-4723-b891-80dc7a5b611e\") " pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.603123 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-system-cni-dir\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.603140 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5a0627a9-533e-4723-b891-80dc7a5b611e-system-cni-dir\") pod \"multus-additional-cni-plugins-gtpxs\" (UID: \"5a0627a9-533e-4723-b891-80dc7a5b611e\") " pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.603155 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/1172b9a5-71ca-49e9-a033-3b59c9c024a4-rootfs\") pod \"machine-config-daemon-kt9lp\" (UID: \"1172b9a5-71ca-49e9-a033-3b59c9c024a4\") " pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.603175 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqdgd\" (UniqueName: \"kubernetes.io/projected/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-kube-api-access-pqdgd\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.603205 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-multus-conf-dir\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.603224 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-host-var-lib-cni-multus\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.603240 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-host-var-lib-kubelet\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.603257 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-multus-daemon-config\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.603278 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqkkr\" (UniqueName: \"kubernetes.io/projected/5a0627a9-533e-4723-b891-80dc7a5b611e-kube-api-access-bqkkr\") pod \"multus-additional-cni-plugins-gtpxs\" (UID: \"5a0627a9-533e-4723-b891-80dc7a5b611e\") " pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.603310 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-hostroot\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.603326 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1172b9a5-71ca-49e9-a033-3b59c9c024a4-mcd-auth-proxy-config\") pod \"machine-config-daemon-kt9lp\" (UID: \"1172b9a5-71ca-49e9-a033-3b59c9c024a4\") " pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.610376 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a44d4f04-0f7d-4c44-9dc0-1ecd80a10388\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f16a693a9ddada9fa2b8ef889011d4aa81ead7c66bc2245ecf1243371bd5e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.622430 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.632864 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.633019 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.633095 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.633171 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.633240 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:27Z","lastTransitionTime":"2026-02-23T09:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.634984 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n556f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqdgd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n556f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.652773 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c5360b0-bcfe-49c7-99dd-87f4e16a1936\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad16faa9445f6f43d8e3f0fda566b1fde83ccbf5a60419cb21f48495cc29b9f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02ab00dc78b9f4a01120f4d13159010cd9e224980671aac02cf91612673f557a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f610cfd6d780a3e8882ec6cf11406234d20578fbf77ce9c9bd307e7da80c4691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748315cd1ff895c9c49ed28ebc47f96eab7f0bfa090ab092dddfcfcffbc4d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://616a1ea1355329722a38418fc7d3b3f6be94b6d0a505aba61d85c755873e5d4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.664270 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a44d4f04-0f7d-4c44-9dc0-1ecd80a10388\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f16a693a9ddada9fa2b8ef889011d4aa81ead7c66bc2245ecf1243371bd5e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.675852 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.684998 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.700979 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gtpxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a0627a9-533e-4723-b891-80dc7a5b611e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gtpxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.704778 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-os-release\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.704948 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-cni-binary-copy\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.704968 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-multus-cni-dir\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.704898 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-os-release\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.705040 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/5a0627a9-533e-4723-b891-80dc7a5b611e-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gtpxs\" (UID: \"5a0627a9-533e-4723-b891-80dc7a5b611e\") " pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.705063 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77qrb\" (UniqueName: \"kubernetes.io/projected/1172b9a5-71ca-49e9-a033-3b59c9c024a4-kube-api-access-77qrb\") pod \"machine-config-daemon-kt9lp\" (UID: \"1172b9a5-71ca-49e9-a033-3b59c9c024a4\") " pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.705257 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-multus-cni-dir\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.705979 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/5a0627a9-533e-4723-b891-80dc7a5b611e-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-gtpxs\" (UID: \"5a0627a9-533e-4723-b891-80dc7a5b611e\") " pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.706047 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-host-run-multus-certs\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.706067 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-etc-kubernetes\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.706121 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-etc-kubernetes\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.706107 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-host-run-multus-certs\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.706155 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-cnibin\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.706180 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1172b9a5-71ca-49e9-a033-3b59c9c024a4-proxy-tls\") pod \"machine-config-daemon-kt9lp\" (UID: \"1172b9a5-71ca-49e9-a033-3b59c9c024a4\") " pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.706235 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-cnibin\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.706061 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-cni-binary-copy\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.706283 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-system-cni-dir\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.706306 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-host-run-netns\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.706661 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-system-cni-dir\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.706688 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-host-run-netns\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.707078 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-host-var-lib-cni-bin\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.707146 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/5a0627a9-533e-4723-b891-80dc7a5b611e-os-release\") pod \"multus-additional-cni-plugins-gtpxs\" (UID: \"5a0627a9-533e-4723-b891-80dc7a5b611e\") " pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.707110 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-host-var-lib-cni-bin\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.707260 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/5a0627a9-533e-4723-b891-80dc7a5b611e-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gtpxs\" (UID: \"5a0627a9-533e-4723-b891-80dc7a5b611e\") " pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.707488 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5a0627a9-533e-4723-b891-80dc7a5b611e-system-cni-dir\") pod \"multus-additional-cni-plugins-gtpxs\" (UID: \"5a0627a9-533e-4723-b891-80dc7a5b611e\") " pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.707600 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/1172b9a5-71ca-49e9-a033-3b59c9c024a4-rootfs\") pod \"machine-config-daemon-kt9lp\" (UID: \"1172b9a5-71ca-49e9-a033-3b59c9c024a4\") " pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.707688 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/1172b9a5-71ca-49e9-a033-3b59c9c024a4-rootfs\") pod \"machine-config-daemon-kt9lp\" (UID: \"1172b9a5-71ca-49e9-a033-3b59c9c024a4\") " pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.707317 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/5a0627a9-533e-4723-b891-80dc7a5b611e-os-release\") pod \"multus-additional-cni-plugins-gtpxs\" (UID: \"5a0627a9-533e-4723-b891-80dc7a5b611e\") " pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.707565 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/5a0627a9-533e-4723-b891-80dc7a5b611e-system-cni-dir\") pod \"multus-additional-cni-plugins-gtpxs\" (UID: \"5a0627a9-533e-4723-b891-80dc7a5b611e\") " pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.707844 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-multus-conf-dir\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.707910 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-multus-conf-dir\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.708010 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqdgd\" (UniqueName: \"kubernetes.io/projected/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-kube-api-access-pqdgd\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.708132 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-host-var-lib-cni-multus\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.708237 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-host-var-lib-kubelet\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.708335 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-multus-daemon-config\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.708459 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-host-var-lib-cni-multus\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.708471 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-host-var-lib-kubelet\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.708470 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqkkr\" (UniqueName: \"kubernetes.io/projected/5a0627a9-533e-4723-b891-80dc7a5b611e-kube-api-access-bqkkr\") pod \"multus-additional-cni-plugins-gtpxs\" (UID: \"5a0627a9-533e-4723-b891-80dc7a5b611e\") " pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.708560 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-hostroot\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.708594 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1172b9a5-71ca-49e9-a033-3b59c9c024a4-mcd-auth-proxy-config\") pod \"machine-config-daemon-kt9lp\" (UID: \"1172b9a5-71ca-49e9-a033-3b59c9c024a4\") " pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.708652 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-multus-socket-dir-parent\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.708686 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-host-run-k8s-cni-cncf-io\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.707935 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/5a0627a9-533e-4723-b891-80dc7a5b611e-tuning-conf-dir\") pod \"multus-additional-cni-plugins-gtpxs\" (UID: \"5a0627a9-533e-4723-b891-80dc7a5b611e\") " pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.708777 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/5a0627a9-533e-4723-b891-80dc7a5b611e-cnibin\") pod \"multus-additional-cni-plugins-gtpxs\" (UID: \"5a0627a9-533e-4723-b891-80dc7a5b611e\") " pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.708740 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/5a0627a9-533e-4723-b891-80dc7a5b611e-cnibin\") pod \"multus-additional-cni-plugins-gtpxs\" (UID: \"5a0627a9-533e-4723-b891-80dc7a5b611e\") " pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.708917 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-host-run-k8s-cni-cncf-io\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.708963 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/5a0627a9-533e-4723-b891-80dc7a5b611e-cni-binary-copy\") pod \"multus-additional-cni-plugins-gtpxs\" (UID: \"5a0627a9-533e-4723-b891-80dc7a5b611e\") " pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.708929 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-multus-socket-dir-parent\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.709106 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-hostroot\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.709283 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-multus-daemon-config\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.710658 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/5a0627a9-533e-4723-b891-80dc7a5b611e-cni-binary-copy\") pod \"multus-additional-cni-plugins-gtpxs\" (UID: \"5a0627a9-533e-4723-b891-80dc7a5b611e\") " pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.711066 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/1172b9a5-71ca-49e9-a033-3b59c9c024a4-mcd-auth-proxy-config\") pod \"machine-config-daemon-kt9lp\" (UID: \"1172b9a5-71ca-49e9-a033-3b59c9c024a4\") " pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.711959 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/1172b9a5-71ca-49e9-a033-3b59c9c024a4-proxy-tls\") pod \"machine-config-daemon-kt9lp\" (UID: \"1172b9a5-71ca-49e9-a033-3b59c9c024a4\") " pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.717464 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:04Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99ab242a4e943c610bcfa7f3388d31dbb45813d765df811d4e5a1d1b0a2d1adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.724989 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77qrb\" (UniqueName: \"kubernetes.io/projected/1172b9a5-71ca-49e9-a033-3b59c9c024a4-kube-api-access-77qrb\") pod \"machine-config-daemon-kt9lp\" (UID: \"1172b9a5-71ca-49e9-a033-3b59c9c024a4\") " pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.728042 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqdgd\" (UniqueName: \"kubernetes.io/projected/56fcafd6-2c67-4f14-a43e-8a6cd12f012e-kube-api-access-pqdgd\") pod \"multus-n556f\" (UID: \"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\") " pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.731580 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.732436 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqkkr\" (UniqueName: \"kubernetes.io/projected/5a0627a9-533e-4723-b891-80dc7a5b611e-kube-api-access-bqkkr\") pod \"multus-additional-cni-plugins-gtpxs\" (UID: \"5a0627a9-533e-4723-b891-80dc7a5b611e\") " pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.736695 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.736771 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.736785 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.736807 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.736820 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:27Z","lastTransitionTime":"2026-02-23T09:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.747639 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.760682 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.770423 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6ml2v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4906a4ab-383b-45d4-a1e4-5a849346b4e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rssfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6ml2v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.781697 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1172b9a5-71ca-49e9-a033-3b59c9c024a4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-77qrb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-77qrb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt9lp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.793339 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f86d8e6-b3d5-402a-9f9b-568ac673d63c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe1ece38b380cdd99d8c323c271e3753fdacdbbfb65f9ea0f7d46a4b99443ea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://828b449e3bfa815231a28f30a1f8d4360fc21abd57f0f5fbb2297c41a7116189\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06c52566aed1d53aa400ebba7eb5642d90291f5e0a712fe950ba434a8bd2c342\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-23T09:08:54Z\\\",\\\"message\\\":\\\"le observer\\\\nW0223 09:08:54.279935 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0223 09:08:54.280187 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0223 09:08:54.281744 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1246126551/tls.crt::/tmp/serving-cert-1246126551/tls.key\\\\\\\"\\\\nI0223 09:08:54.648156 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0223 09:08:54.651138 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0223 09:08:54.651159 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0223 09:08:54.651179 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0223 09:08:54.651185 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0223 09:08:54.656437 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0223 09:08:54.656475 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0223 09:08:54.656536 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656560 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656581 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0223 09:08:54.656602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0223 09:08:54.656623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0223 09:08:54.656642 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0223 09:08:54.657691 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-23T09:08:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c99b854342f23f595ebbf00de97b11b8a4bbd92e1bbcfcb876dd8d8139f84b15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.797670 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-n556f" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.809158 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" Feb 23 09:09:27 crc kubenswrapper[4834]: W0223 09:09:27.811064 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod56fcafd6_2c67_4f14_a43e_8a6cd12f012e.slice/crio-c66c1dac0f352d629c055018adfacd6d1f351afad35f17856e4ac0658b5bf0d1 WatchSource:0}: Error finding container c66c1dac0f352d629c055018adfacd6d1f351afad35f17856e4ac0658b5bf0d1: Status 404 returned error can't find the container with id c66c1dac0f352d629c055018adfacd6d1f351afad35f17856e4ac0658b5bf0d1 Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.816527 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-gtpxs" Feb 23 09:09:27 crc kubenswrapper[4834]: W0223 09:09:27.826688 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1172b9a5_71ca_49e9_a033_3b59c9c024a4.slice/crio-3f8ad4b0aed866a6c9313a81cbda97a5957e42d9204872d03552a1ca7879d7d5 WatchSource:0}: Error finding container 3f8ad4b0aed866a6c9313a81cbda97a5957e42d9204872d03552a1ca7879d7d5: Status 404 returned error can't find the container with id 3f8ad4b0aed866a6c9313a81cbda97a5957e42d9204872d03552a1ca7879d7d5 Feb 23 09:09:27 crc kubenswrapper[4834]: W0223 09:09:27.835801 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5a0627a9_533e_4723_b891_80dc7a5b611e.slice/crio-e65a3d163112bd81bd7430386ef77caf5771cf6fd0d009ce3c71a168cd2a9785 WatchSource:0}: Error finding container e65a3d163112bd81bd7430386ef77caf5771cf6fd0d009ce3c71a168cd2a9785: Status 404 returned error can't find the container with id e65a3d163112bd81bd7430386ef77caf5771cf6fd0d009ce3c71a168cd2a9785 Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.837263 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-x2c4z"] Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.838425 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.843059 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.844807 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.843273 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.843283 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.843511 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.843881 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.844108 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.847093 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.849090 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.849116 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.849135 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.849149 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:27Z","lastTransitionTime":"2026-02-23T09:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.854502 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.872909 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.890225 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gtpxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a0627a9-533e-4723-b891-80dc7a5b611e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gtpxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.912388 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5102eeec-7776-42da-8027-c4e5f9c13450\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-x2c4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.954913 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.954956 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.954970 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.954994 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.955009 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:27Z","lastTransitionTime":"2026-02-23T09:09:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.959833 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a44d4f04-0f7d-4c44-9dc0-1ecd80a10388\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f16a693a9ddada9fa2b8ef889011d4aa81ead7c66bc2245ecf1243371bd5e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:27 crc kubenswrapper[4834]: I0223 09:09:27.988171 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.011638 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-run-ovn\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.011682 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-node-log\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.011715 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-log-socket\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.011735 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-run-netns\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.011812 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5102eeec-7776-42da-8027-c4e5f9c13450-env-overrides\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.011851 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5102eeec-7776-42da-8027-c4e5f9c13450-ovn-node-metrics-cert\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.011873 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.011945 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-slash\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.011964 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-systemd-units\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.011979 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-etc-openvswitch\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.012001 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-run-systemd\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.012015 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-cni-bin\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.012033 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-kubelet\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.012051 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-run-openvswitch\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.012067 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5102eeec-7776-42da-8027-c4e5f9c13450-ovnkube-script-lib\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.012091 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-run-ovn-kubernetes\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.012105 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-cni-netd\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.012123 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-var-lib-openvswitch\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.012141 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5102eeec-7776-42da-8027-c4e5f9c13450-ovnkube-config\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.012158 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxvrj\" (UniqueName: \"kubernetes.io/projected/5102eeec-7776-42da-8027-c4e5f9c13450-kube-api-access-bxvrj\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.017465 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.025067 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" event={"ID":"1172b9a5-71ca-49e9-a033-3b59c9c024a4","Type":"ContainerStarted","Data":"5f0104afd41d2190f8d4b34e88bbfed12c35766c1fe0c2a0e109ae44cbac3345"} Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.025121 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" event={"ID":"1172b9a5-71ca-49e9-a033-3b59c9c024a4","Type":"ContainerStarted","Data":"3f8ad4b0aed866a6c9313a81cbda97a5957e42d9204872d03552a1ca7879d7d5"} Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.027504 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gtpxs" event={"ID":"5a0627a9-533e-4723-b891-80dc7a5b611e","Type":"ContainerStarted","Data":"e65a3d163112bd81bd7430386ef77caf5771cf6fd0d009ce3c71a168cd2a9785"} Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.029702 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.029928 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-6ml2v" event={"ID":"4906a4ab-383b-45d4-a1e4-5a849346b4e6","Type":"ContainerStarted","Data":"5a3ef9573d98ad27a7330f3b10b7ee604e353073d66b7c29aa841cc79967dbc4"} Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.029956 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-6ml2v" event={"ID":"4906a4ab-383b-45d4-a1e4-5a849346b4e6","Type":"ContainerStarted","Data":"3609ac4ef6f44fda91d4470d67b8c480d555b6cce29ebe49806497b80cb1da04"} Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.034063 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-n556f" event={"ID":"56fcafd6-2c67-4f14-a43e-8a6cd12f012e","Type":"ContainerStarted","Data":"446599f5f5dc690f406caf10dc8da7f3834a353ad1fa1d37e4998a0bc247834d"} Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.034136 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-n556f" event={"ID":"56fcafd6-2c67-4f14-a43e-8a6cd12f012e","Type":"ContainerStarted","Data":"c66c1dac0f352d629c055018adfacd6d1f351afad35f17856e4ac0658b5bf0d1"} Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.039182 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6ml2v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4906a4ab-383b-45d4-a1e4-5a849346b4e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rssfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6ml2v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.049759 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1172b9a5-71ca-49e9-a033-3b59c9c024a4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-77qrb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-77qrb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt9lp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.057907 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.057944 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.057953 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.057972 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.057984 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:28Z","lastTransitionTime":"2026-02-23T09:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.068256 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f86d8e6-b3d5-402a-9f9b-568ac673d63c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe1ece38b380cdd99d8c323c271e3753fdacdbbfb65f9ea0f7d46a4b99443ea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://828b449e3bfa815231a28f30a1f8d4360fc21abd57f0f5fbb2297c41a7116189\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06c52566aed1d53aa400ebba7eb5642d90291f5e0a712fe950ba434a8bd2c342\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-23T09:08:54Z\\\",\\\"message\\\":\\\"le observer\\\\nW0223 09:08:54.279935 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0223 09:08:54.280187 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0223 09:08:54.281744 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1246126551/tls.crt::/tmp/serving-cert-1246126551/tls.key\\\\\\\"\\\\nI0223 09:08:54.648156 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0223 09:08:54.651138 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0223 09:08:54.651159 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0223 09:08:54.651179 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0223 09:08:54.651185 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0223 09:08:54.656437 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0223 09:08:54.656475 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0223 09:08:54.656536 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656560 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656581 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0223 09:08:54.656602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0223 09:08:54.656623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0223 09:08:54.656642 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0223 09:08:54.657691 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-23T09:08:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c99b854342f23f595ebbf00de97b11b8a4bbd92e1bbcfcb876dd8d8139f84b15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.082800 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:04Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99ab242a4e943c610bcfa7f3388d31dbb45813d765df811d4e5a1d1b0a2d1adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.098330 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n556f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqdgd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n556f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.112855 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-systemd-units\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.112900 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-etc-openvswitch\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.112923 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-run-systemd\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.112948 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-cni-bin\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.112969 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-kubelet\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.112986 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-run-openvswitch\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113016 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5102eeec-7776-42da-8027-c4e5f9c13450-ovnkube-script-lib\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113043 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-run-ovn-kubernetes\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113054 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-kubelet\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113126 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-run-openvswitch\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113116 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-run-systemd\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113163 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-cni-bin\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113138 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-var-lib-openvswitch\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113065 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-var-lib-openvswitch\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113268 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-cni-netd\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113267 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-etc-openvswitch\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113293 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5102eeec-7776-42da-8027-c4e5f9c13450-ovnkube-config\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113313 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-cni-netd\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113316 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxvrj\" (UniqueName: \"kubernetes.io/projected/5102eeec-7776-42da-8027-c4e5f9c13450-kube-api-access-bxvrj\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113377 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-run-ovn\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113438 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-node-log\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113468 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-log-socket\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113517 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-run-netns\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113565 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5102eeec-7776-42da-8027-c4e5f9c13450-env-overrides\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113572 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-run-ovn-kubernetes\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113595 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5102eeec-7776-42da-8027-c4e5f9c13450-ovn-node-metrics-cert\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113761 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113820 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-slash\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113910 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-slash\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113945 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.112994 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-systemd-units\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113966 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5102eeec-7776-42da-8027-c4e5f9c13450-ovnkube-script-lib\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.113998 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-run-ovn\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.114016 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-run-netns\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.114031 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-node-log\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.114051 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-log-socket\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.114507 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5102eeec-7776-42da-8027-c4e5f9c13450-ovnkube-config\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.114842 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5102eeec-7776-42da-8027-c4e5f9c13450-env-overrides\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.117574 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5102eeec-7776-42da-8027-c4e5f9c13450-ovn-node-metrics-cert\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.117778 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c5360b0-bcfe-49c7-99dd-87f4e16a1936\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad16faa9445f6f43d8e3f0fda566b1fde83ccbf5a60419cb21f48495cc29b9f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02ab00dc78b9f4a01120f4d13159010cd9e224980671aac02cf91612673f557a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f610cfd6d780a3e8882ec6cf11406234d20578fbf77ce9c9bd307e7da80c4691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748315cd1ff895c9c49ed28ebc47f96eab7f0bfa090ab092dddfcfcffbc4d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://616a1ea1355329722a38418fc7d3b3f6be94b6d0a505aba61d85c755873e5d4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.130978 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n556f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://446599f5f5dc690f406caf10dc8da7f3834a353ad1fa1d37e4998a0bc247834d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqdgd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n556f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.131363 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxvrj\" (UniqueName: \"kubernetes.io/projected/5102eeec-7776-42da-8027-c4e5f9c13450-kube-api-access-bxvrj\") pod \"ovnkube-node-x2c4z\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.149962 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c5360b0-bcfe-49c7-99dd-87f4e16a1936\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad16faa9445f6f43d8e3f0fda566b1fde83ccbf5a60419cb21f48495cc29b9f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02ab00dc78b9f4a01120f4d13159010cd9e224980671aac02cf91612673f557a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f610cfd6d780a3e8882ec6cf11406234d20578fbf77ce9c9bd307e7da80c4691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748315cd1ff895c9c49ed28ebc47f96eab7f0bfa090ab092dddfcfcffbc4d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://616a1ea1355329722a38418fc7d3b3f6be94b6d0a505aba61d85c755873e5d4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.160898 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a44d4f04-0f7d-4c44-9dc0-1ecd80a10388\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f16a693a9ddada9fa2b8ef889011d4aa81ead7c66bc2245ecf1243371bd5e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.161191 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.161216 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.161229 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.161246 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.161257 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:28Z","lastTransitionTime":"2026-02-23T09:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.174173 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.176855 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.187296 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: W0223 09:09:28.191266 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5102eeec_7776_42da_8027_c4e5f9c13450.slice/crio-8b1dbfd6a122f81ac3ef84ab31dc4c4371a67d615068efe958d96b78731381f2 WatchSource:0}: Error finding container 8b1dbfd6a122f81ac3ef84ab31dc4c4371a67d615068efe958d96b78731381f2: Status 404 returned error can't find the container with id 8b1dbfd6a122f81ac3ef84ab31dc4c4371a67d615068efe958d96b78731381f2 Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.202015 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gtpxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a0627a9-533e-4723-b891-80dc7a5b611e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gtpxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.220377 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5102eeec-7776-42da-8027-c4e5f9c13450\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-x2c4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.233786 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f86d8e6-b3d5-402a-9f9b-568ac673d63c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe1ece38b380cdd99d8c323c271e3753fdacdbbfb65f9ea0f7d46a4b99443ea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://828b449e3bfa815231a28f30a1f8d4360fc21abd57f0f5fbb2297c41a7116189\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06c52566aed1d53aa400ebba7eb5642d90291f5e0a712fe950ba434a8bd2c342\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-23T09:08:54Z\\\",\\\"message\\\":\\\"le observer\\\\nW0223 09:08:54.279935 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0223 09:08:54.280187 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0223 09:08:54.281744 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1246126551/tls.crt::/tmp/serving-cert-1246126551/tls.key\\\\\\\"\\\\nI0223 09:08:54.648156 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0223 09:08:54.651138 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0223 09:08:54.651159 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0223 09:08:54.651179 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0223 09:08:54.651185 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0223 09:08:54.656437 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0223 09:08:54.656475 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0223 09:08:54.656536 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656560 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656581 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0223 09:08:54.656602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0223 09:08:54.656623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0223 09:08:54.656642 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0223 09:08:54.657691 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-23T09:08:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c99b854342f23f595ebbf00de97b11b8a4bbd92e1bbcfcb876dd8d8139f84b15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.251601 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:04Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99ab242a4e943c610bcfa7f3388d31dbb45813d765df811d4e5a1d1b0a2d1adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.263697 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.263732 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.263741 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.263756 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.263767 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:28Z","lastTransitionTime":"2026-02-23T09:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.264972 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.276205 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.294339 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.301809 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6ml2v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4906a4ab-383b-45d4-a1e4-5a849346b4e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a3ef9573d98ad27a7330f3b10b7ee604e353073d66b7c29aa841cc79967dbc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rssfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6ml2v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.310926 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1172b9a5-71ca-49e9-a033-3b59c9c024a4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-77qrb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-77qrb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt9lp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.366353 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.366415 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.366428 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.366446 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.366459 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:28Z","lastTransitionTime":"2026-02-23T09:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.469775 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.470233 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.470246 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.470263 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.470277 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:28Z","lastTransitionTime":"2026-02-23T09:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.573053 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.573095 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.573109 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.573129 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.573140 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:28Z","lastTransitionTime":"2026-02-23T09:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.594480 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-13 07:07:27.016963918 +0000 UTC Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.676876 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.676943 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.676962 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.676986 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.677002 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:28Z","lastTransitionTime":"2026-02-23T09:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.780712 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.780769 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.780779 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.780797 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.780809 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:28Z","lastTransitionTime":"2026-02-23T09:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.884064 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.884127 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.884141 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.884168 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.884183 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:28Z","lastTransitionTime":"2026-02-23T09:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.987122 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.987175 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.987188 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.987207 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:28 crc kubenswrapper[4834]: I0223 09:09:28.987220 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:28Z","lastTransitionTime":"2026-02-23T09:09:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.039244 4834 generic.go:334] "Generic (PLEG): container finished" podID="5102eeec-7776-42da-8027-c4e5f9c13450" containerID="4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f" exitCode=0 Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.039381 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" event={"ID":"5102eeec-7776-42da-8027-c4e5f9c13450","Type":"ContainerDied","Data":"4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f"} Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.039563 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" event={"ID":"5102eeec-7776-42da-8027-c4e5f9c13450","Type":"ContainerStarted","Data":"8b1dbfd6a122f81ac3ef84ab31dc4c4371a67d615068efe958d96b78731381f2"} Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.041721 4834 generic.go:334] "Generic (PLEG): container finished" podID="5a0627a9-533e-4723-b891-80dc7a5b611e" containerID="9b9235cff879490e0bfdffbb26594e647268f6ece5006908dcb72437411960a8" exitCode=0 Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.041869 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gtpxs" event={"ID":"5a0627a9-533e-4723-b891-80dc7a5b611e","Type":"ContainerDied","Data":"9b9235cff879490e0bfdffbb26594e647268f6ece5006908dcb72437411960a8"} Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.044952 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" event={"ID":"1172b9a5-71ca-49e9-a033-3b59c9c024a4","Type":"ContainerStarted","Data":"ebadb8ee2e8c4ae2652bb6c63206280526e9dca3784756c20614b4d396b41a60"} Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.060085 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f86d8e6-b3d5-402a-9f9b-568ac673d63c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe1ece38b380cdd99d8c323c271e3753fdacdbbfb65f9ea0f7d46a4b99443ea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://828b449e3bfa815231a28f30a1f8d4360fc21abd57f0f5fbb2297c41a7116189\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06c52566aed1d53aa400ebba7eb5642d90291f5e0a712fe950ba434a8bd2c342\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-23T09:08:54Z\\\",\\\"message\\\":\\\"le observer\\\\nW0223 09:08:54.279935 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0223 09:08:54.280187 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0223 09:08:54.281744 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1246126551/tls.crt::/tmp/serving-cert-1246126551/tls.key\\\\\\\"\\\\nI0223 09:08:54.648156 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0223 09:08:54.651138 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0223 09:08:54.651159 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0223 09:08:54.651179 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0223 09:08:54.651185 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0223 09:08:54.656437 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0223 09:08:54.656475 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0223 09:08:54.656536 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656560 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656581 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0223 09:08:54.656602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0223 09:08:54.656623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0223 09:08:54.656642 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0223 09:08:54.657691 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-23T09:08:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c99b854342f23f595ebbf00de97b11b8a4bbd92e1bbcfcb876dd8d8139f84b15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.072274 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:04Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99ab242a4e943c610bcfa7f3388d31dbb45813d765df811d4e5a1d1b0a2d1adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.086655 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.090781 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.090822 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.090834 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.090852 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.090864 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:29Z","lastTransitionTime":"2026-02-23T09:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.100655 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.111190 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.122264 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6ml2v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4906a4ab-383b-45d4-a1e4-5a849346b4e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a3ef9573d98ad27a7330f3b10b7ee604e353073d66b7c29aa841cc79967dbc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rssfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6ml2v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.135728 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1172b9a5-71ca-49e9-a033-3b59c9c024a4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-77qrb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-77qrb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt9lp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.147844 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n556f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://446599f5f5dc690f406caf10dc8da7f3834a353ad1fa1d37e4998a0bc247834d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqdgd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n556f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.167554 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c5360b0-bcfe-49c7-99dd-87f4e16a1936\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad16faa9445f6f43d8e3f0fda566b1fde83ccbf5a60419cb21f48495cc29b9f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02ab00dc78b9f4a01120f4d13159010cd9e224980671aac02cf91612673f557a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f610cfd6d780a3e8882ec6cf11406234d20578fbf77ce9c9bd307e7da80c4691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748315cd1ff895c9c49ed28ebc47f96eab7f0bfa090ab092dddfcfcffbc4d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://616a1ea1355329722a38418fc7d3b3f6be94b6d0a505aba61d85c755873e5d4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.179097 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a44d4f04-0f7d-4c44-9dc0-1ecd80a10388\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f16a693a9ddada9fa2b8ef889011d4aa81ead7c66bc2245ecf1243371bd5e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.191503 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.193919 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.194442 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.194456 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.194473 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.194485 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:29Z","lastTransitionTime":"2026-02-23T09:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.204374 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.219887 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gtpxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a0627a9-533e-4723-b891-80dc7a5b611e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gtpxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.238160 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5102eeec-7776-42da-8027-c4e5f9c13450\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:09:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-x2c4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.252764 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n556f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://446599f5f5dc690f406caf10dc8da7f3834a353ad1fa1d37e4998a0bc247834d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqdgd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n556f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.271916 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c5360b0-bcfe-49c7-99dd-87f4e16a1936\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad16faa9445f6f43d8e3f0fda566b1fde83ccbf5a60419cb21f48495cc29b9f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02ab00dc78b9f4a01120f4d13159010cd9e224980671aac02cf91612673f557a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f610cfd6d780a3e8882ec6cf11406234d20578fbf77ce9c9bd307e7da80c4691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748315cd1ff895c9c49ed28ebc47f96eab7f0bfa090ab092dddfcfcffbc4d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://616a1ea1355329722a38418fc7d3b3f6be94b6d0a505aba61d85c755873e5d4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.283532 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.296807 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gtpxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a0627a9-533e-4723-b891-80dc7a5b611e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9235cff879490e0bfdffbb26594e647268f6ece5006908dcb72437411960a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b9235cff879490e0bfdffbb26594e647268f6ece5006908dcb72437411960a8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:09:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gtpxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.297059 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.297103 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.297132 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.297152 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.297163 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:29Z","lastTransitionTime":"2026-02-23T09:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.317263 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5102eeec-7776-42da-8027-c4e5f9c13450\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:09:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-x2c4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.326317 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a44d4f04-0f7d-4c44-9dc0-1ecd80a10388\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f16a693a9ddada9fa2b8ef889011d4aa81ead7c66bc2245ecf1243371bd5e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.336977 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.349719 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.361645 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.371971 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6ml2v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4906a4ab-383b-45d4-a1e4-5a849346b4e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a3ef9573d98ad27a7330f3b10b7ee604e353073d66b7c29aa841cc79967dbc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rssfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6ml2v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.382906 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1172b9a5-71ca-49e9-a033-3b59c9c024a4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebadb8ee2e8c4ae2652bb6c63206280526e9dca3784756c20614b4d396b41a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-77qrb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f0104afd41d2190f8d4b34e88bbfed12c35766c1fe0c2a0e109ae44cbac3345\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-77qrb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt9lp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.398053 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f86d8e6-b3d5-402a-9f9b-568ac673d63c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe1ece38b380cdd99d8c323c271e3753fdacdbbfb65f9ea0f7d46a4b99443ea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://828b449e3bfa815231a28f30a1f8d4360fc21abd57f0f5fbb2297c41a7116189\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06c52566aed1d53aa400ebba7eb5642d90291f5e0a712fe950ba434a8bd2c342\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-23T09:08:54Z\\\",\\\"message\\\":\\\"le observer\\\\nW0223 09:08:54.279935 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0223 09:08:54.280187 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0223 09:08:54.281744 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1246126551/tls.crt::/tmp/serving-cert-1246126551/tls.key\\\\\\\"\\\\nI0223 09:08:54.648156 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0223 09:08:54.651138 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0223 09:08:54.651159 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0223 09:08:54.651179 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0223 09:08:54.651185 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0223 09:08:54.656437 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0223 09:08:54.656475 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0223 09:08:54.656536 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656560 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656581 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0223 09:08:54.656602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0223 09:08:54.656623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0223 09:08:54.656642 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0223 09:08:54.657691 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-23T09:08:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c99b854342f23f595ebbf00de97b11b8a4bbd92e1bbcfcb876dd8d8139f84b15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.404241 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.404289 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.404303 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.404323 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.404338 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:29Z","lastTransitionTime":"2026-02-23T09:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.412080 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:04Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99ab242a4e943c610bcfa7f3388d31dbb45813d765df811d4e5a1d1b0a2d1adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.424007 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.507880 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.507929 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.507947 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.507970 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.507981 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:29Z","lastTransitionTime":"2026-02-23T09:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.584630 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.584682 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.584902 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:29 crc kubenswrapper[4834]: E0223 09:09:29.585154 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 23 09:09:29 crc kubenswrapper[4834]: E0223 09:09:29.585230 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 23 09:09:29 crc kubenswrapper[4834]: E0223 09:09:29.585372 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.595459 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 11:55:52.69171524 +0000 UTC Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.611232 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.611270 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.611282 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.611299 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.611311 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:29Z","lastTransitionTime":"2026-02-23T09:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.714773 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.714818 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.714828 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.714850 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.714863 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:29Z","lastTransitionTime":"2026-02-23T09:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.818138 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.818590 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.818600 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.818615 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.818626 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:29Z","lastTransitionTime":"2026-02-23T09:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.921751 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.921798 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.921811 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.921831 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:29 crc kubenswrapper[4834]: I0223 09:09:29.921842 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:29Z","lastTransitionTime":"2026-02-23T09:09:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.025264 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.025300 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.025310 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.025327 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.025339 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:30Z","lastTransitionTime":"2026-02-23T09:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.051331 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"1c6547ce79b67722a3eca4195fc6dbd081ff16e9a861ca95746d91724cf90e7f"} Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.052256 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"78ed73f0167a0971ec780df235feb375b78b2a50ca31e28cc4b0bb2862d06437"} Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.066726 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" event={"ID":"5102eeec-7776-42da-8027-c4e5f9c13450","Type":"ContainerStarted","Data":"81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd"} Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.066779 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" event={"ID":"5102eeec-7776-42da-8027-c4e5f9c13450","Type":"ContainerStarted","Data":"1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9"} Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.066791 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" event={"ID":"5102eeec-7776-42da-8027-c4e5f9c13450","Type":"ContainerStarted","Data":"ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e"} Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.066802 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" event={"ID":"5102eeec-7776-42da-8027-c4e5f9c13450","Type":"ContainerStarted","Data":"ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649"} Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.071222 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gtpxs" event={"ID":"5a0627a9-533e-4723-b891-80dc7a5b611e","Type":"ContainerStarted","Data":"cc0fada4b3bd5343eb1d3bd72ee0dc9adab75b3d0bb7a9d7ddd6627d1e7b146f"} Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.078525 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0c5360b0-bcfe-49c7-99dd-87f4e16a1936\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:08:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ad16faa9445f6f43d8e3f0fda566b1fde83ccbf5a60419cb21f48495cc29b9f4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://02ab00dc78b9f4a01120f4d13159010cd9e224980671aac02cf91612673f557a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://f610cfd6d780a3e8882ec6cf11406234d20578fbf77ce9c9bd307e7da80c4691\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://748315cd1ff895c9c49ed28ebc47f96eab7f0bfa090ab092dddfcfcffbc4d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://616a1ea1355329722a38418fc7d3b3f6be94b6d0a505aba61d85c755873e5d4c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b20d5f16955287ef901690dda54247a2158f84eb929c5c943f63da36f6272b2e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3f6dd4c3b78c0c7dbfef7b071d430d28b0146072c289b6d845ad2627eae495a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c5c7e127b85b40e633c7724328dfd7f757e10793458662ddc1bc1ec7824be3a8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.090771 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.104768 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-gtpxs" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5a0627a9-533e-4723-b891-80dc7a5b611e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9b9235cff879490e0bfdffbb26594e647268f6ece5006908dcb72437411960a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9b9235cff879490e0bfdffbb26594e647268f6ece5006908dcb72437411960a8\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:09:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bqkkr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-gtpxs\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.127292 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5102eeec-7776-42da-8027-c4e5f9c13450\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:09:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bxvrj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-x2c4z\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.128265 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.128309 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.128323 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.128347 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.128366 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:30Z","lastTransitionTime":"2026-02-23T09:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.137278 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a44d4f04-0f7d-4c44-9dc0-1ecd80a10388\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3f16a693a9ddada9fa2b8ef889011d4aa81ead7c66bc2245ecf1243371bd5e74\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://21a03d312435acdadc37959ff6b02513d366b1fa710d758ce2b030fe535cbab9\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.150700 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.163028 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1c6547ce79b67722a3eca4195fc6dbd081ff16e9a861ca95746d91724cf90e7f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://78ed73f0167a0971ec780df235feb375b78b2a50ca31e28cc4b0bb2862d06437\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.174560 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"services have not yet been read at least once, cannot construct envvars\\\",\\\"reason\\\":\\\"CreateContainerConfigError\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.182764 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-6ml2v" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"4906a4ab-383b-45d4-a1e4-5a849346b4e6\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5a3ef9573d98ad27a7330f3b10b7ee604e353073d66b7c29aa841cc79967dbc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rssfs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-6ml2v\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.194145 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1172b9a5-71ca-49e9-a033-3b59c9c024a4\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ebadb8ee2e8c4ae2652bb6c63206280526e9dca3784756c20614b4d396b41a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-77qrb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5f0104afd41d2190f8d4b34e88bbfed12c35766c1fe0c2a0e109ae44cbac3345\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-77qrb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-kt9lp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.206956 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"1f86d8e6-b3d5-402a-9f9b-568ac673d63c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:07:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://fe1ece38b380cdd99d8c323c271e3753fdacdbbfb65f9ea0f7d46a4b99443ea7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://828b449e3bfa815231a28f30a1f8d4360fc21abd57f0f5fbb2297c41a7116189\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://06c52566aed1d53aa400ebba7eb5642d90291f5e0a712fe950ba434a8bd2c342\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-02-23T09:08:54Z\\\",\\\"message\\\":\\\"le observer\\\\nW0223 09:08:54.279935 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI0223 09:08:54.280187 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0223 09:08:54.281744 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1246126551/tls.crt::/tmp/serving-cert-1246126551/tls.key\\\\\\\"\\\\nI0223 09:08:54.648156 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0223 09:08:54.651138 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0223 09:08:54.651159 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0223 09:08:54.651179 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0223 09:08:54.651185 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0223 09:08:54.656437 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI0223 09:08:54.656475 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW0223 09:08:54.656536 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656560 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0223 09:08:54.656581 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0223 09:08:54.656602 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0223 09:08:54.656623 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0223 09:08:54.656642 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF0223 09:08:54.657691 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-02-23T09:08:53Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c99b854342f23f595ebbf00de97b11b8a4bbd92e1bbcfcb876dd8d8139f84b15\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:07:47Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-02-23T09:07:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-02-23T09:07:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:07:44Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.219724 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:04Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:21Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://99ab242a4e943c610bcfa7f3388d31dbb45813d765df811d4e5a1d1b0a2d1adc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:20Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.231040 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:03Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.232441 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.232484 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.232496 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.232513 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.232527 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:30Z","lastTransitionTime":"2026-02-23T09:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.243824 4834 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-n556f" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"56fcafd6-2c67-4f14-a43e-8a6cd12f012e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-02-23T09:09:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://446599f5f5dc690f406caf10dc8da7f3834a353ad1fa1d37e4998a0bc247834d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-02-23T09:09:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pqdgd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-02-23T09:09:27Z\\\"}}\" for pod \"openshift-multus\"/\"multus-n556f\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.263822 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-6ml2v" podStartSLOduration=39.26379769 podStartE2EDuration="39.26379769s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:30.263526843 +0000 UTC m=+106.341841230" watchObservedRunningTime="2026-02-23 09:09:30.26379769 +0000 UTC m=+106.342112067" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.299150 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" podStartSLOduration=39.299117998 podStartE2EDuration="39.299117998s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:30.277720196 +0000 UTC m=+106.356034583" watchObservedRunningTime="2026-02-23 09:09:30.299117998 +0000 UTC m=+106.377432385" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.334860 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.334943 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.334967 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.334997 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.335024 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:30Z","lastTransitionTime":"2026-02-23T09:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.378238 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-n556f" podStartSLOduration=39.37820767 podStartE2EDuration="39.37820767s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:30.377820599 +0000 UTC m=+106.456134996" watchObservedRunningTime="2026-02-23 09:09:30.37820767 +0000 UTC m=+106.456522057" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.409105 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=18.409082885 podStartE2EDuration="18.409082885s" podCreationTimestamp="2026-02-23 09:09:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:30.407328217 +0000 UTC m=+106.485642624" watchObservedRunningTime="2026-02-23 09:09:30.409082885 +0000 UTC m=+106.487397272" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.437478 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.437518 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.437531 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.437548 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.437563 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:30Z","lastTransitionTime":"2026-02-23T09:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.459016 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=17.458979528 podStartE2EDuration="17.458979528s" podCreationTimestamp="2026-02-23 09:09:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:30.444521688 +0000 UTC m=+106.522836075" watchObservedRunningTime="2026-02-23 09:09:30.458979528 +0000 UTC m=+106.537293955" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.462666 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-jstkv"] Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.463075 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-jstkv" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.465075 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.466578 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.467338 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.468140 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.540376 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.540438 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.540455 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.540477 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.540493 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:30Z","lastTransitionTime":"2026-02-23T09:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.541723 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3cc4bc05-d623-4a1e-879a-773a30d94a11-host\") pod \"node-ca-jstkv\" (UID: \"3cc4bc05-d623-4a1e-879a-773a30d94a11\") " pod="openshift-image-registry/node-ca-jstkv" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.541788 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cc4bc05-d623-4a1e-879a-773a30d94a11-serviceca\") pod \"node-ca-jstkv\" (UID: \"3cc4bc05-d623-4a1e-879a-773a30d94a11\") " pod="openshift-image-registry/node-ca-jstkv" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.541846 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vdpr2\" (UniqueName: \"kubernetes.io/projected/3cc4bc05-d623-4a1e-879a-773a30d94a11-kube-api-access-vdpr2\") pod \"node-ca-jstkv\" (UID: \"3cc4bc05-d623-4a1e-879a-773a30d94a11\") " pod="openshift-image-registry/node-ca-jstkv" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.596643 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 22:24:17.825882315 +0000 UTC Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.614765 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zjmkx"] Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.615377 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zjmkx" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.617014 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.617991 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.633562 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-nzcfx"] Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.634161 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nzcfx" Feb 23 09:09:30 crc kubenswrapper[4834]: E0223 09:09:30.634240 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nzcfx" podUID="4ceb8401-3a07-422f-ae4d-14366611f4a6" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.642383 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cc4bc05-d623-4a1e-879a-773a30d94a11-serviceca\") pod \"node-ca-jstkv\" (UID: \"3cc4bc05-d623-4a1e-879a-773a30d94a11\") " pod="openshift-image-registry/node-ca-jstkv" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.642505 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/495eec18-4e3d-4a65-805e-48fc2c966626-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-zjmkx\" (UID: \"495eec18-4e3d-4a65-805e-48fc2c966626\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zjmkx" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.642536 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vdpr2\" (UniqueName: \"kubernetes.io/projected/3cc4bc05-d623-4a1e-879a-773a30d94a11-kube-api-access-vdpr2\") pod \"node-ca-jstkv\" (UID: \"3cc4bc05-d623-4a1e-879a-773a30d94a11\") " pod="openshift-image-registry/node-ca-jstkv" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.642563 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/495eec18-4e3d-4a65-805e-48fc2c966626-env-overrides\") pod \"ovnkube-control-plane-749d76644c-zjmkx\" (UID: \"495eec18-4e3d-4a65-805e-48fc2c966626\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zjmkx" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.642596 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ceb8401-3a07-422f-ae4d-14366611f4a6-metrics-certs\") pod \"network-metrics-daemon-nzcfx\" (UID: \"4ceb8401-3a07-422f-ae4d-14366611f4a6\") " pod="openshift-multus/network-metrics-daemon-nzcfx" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.642619 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3cc4bc05-d623-4a1e-879a-773a30d94a11-host\") pod \"node-ca-jstkv\" (UID: \"3cc4bc05-d623-4a1e-879a-773a30d94a11\") " pod="openshift-image-registry/node-ca-jstkv" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.642644 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dvc7c\" (UniqueName: \"kubernetes.io/projected/495eec18-4e3d-4a65-805e-48fc2c966626-kube-api-access-dvc7c\") pod \"ovnkube-control-plane-749d76644c-zjmkx\" (UID: \"495eec18-4e3d-4a65-805e-48fc2c966626\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zjmkx" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.642674 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/495eec18-4e3d-4a65-805e-48fc2c966626-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-zjmkx\" (UID: \"495eec18-4e3d-4a65-805e-48fc2c966626\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zjmkx" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.642695 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqx6w\" (UniqueName: \"kubernetes.io/projected/4ceb8401-3a07-422f-ae4d-14366611f4a6-kube-api-access-zqx6w\") pod \"network-metrics-daemon-nzcfx\" (UID: \"4ceb8401-3a07-422f-ae4d-14366611f4a6\") " pod="openshift-multus/network-metrics-daemon-nzcfx" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.642801 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/3cc4bc05-d623-4a1e-879a-773a30d94a11-host\") pod \"node-ca-jstkv\" (UID: \"3cc4bc05-d623-4a1e-879a-773a30d94a11\") " pod="openshift-image-registry/node-ca-jstkv" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.643609 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cc4bc05-d623-4a1e-879a-773a30d94a11-serviceca\") pod \"node-ca-jstkv\" (UID: \"3cc4bc05-d623-4a1e-879a-773a30d94a11\") " pod="openshift-image-registry/node-ca-jstkv" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.644071 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.644133 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.644153 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.644176 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.644190 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:30Z","lastTransitionTime":"2026-02-23T09:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.662514 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vdpr2\" (UniqueName: \"kubernetes.io/projected/3cc4bc05-d623-4a1e-879a-773a30d94a11-kube-api-access-vdpr2\") pod \"node-ca-jstkv\" (UID: \"3cc4bc05-d623-4a1e-879a-773a30d94a11\") " pod="openshift-image-registry/node-ca-jstkv" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.743472 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dvc7c\" (UniqueName: \"kubernetes.io/projected/495eec18-4e3d-4a65-805e-48fc2c966626-kube-api-access-dvc7c\") pod \"ovnkube-control-plane-749d76644c-zjmkx\" (UID: \"495eec18-4e3d-4a65-805e-48fc2c966626\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zjmkx" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.743553 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/495eec18-4e3d-4a65-805e-48fc2c966626-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-zjmkx\" (UID: \"495eec18-4e3d-4a65-805e-48fc2c966626\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zjmkx" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.743583 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqx6w\" (UniqueName: \"kubernetes.io/projected/4ceb8401-3a07-422f-ae4d-14366611f4a6-kube-api-access-zqx6w\") pod \"network-metrics-daemon-nzcfx\" (UID: \"4ceb8401-3a07-422f-ae4d-14366611f4a6\") " pod="openshift-multus/network-metrics-daemon-nzcfx" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.743636 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/495eec18-4e3d-4a65-805e-48fc2c966626-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-zjmkx\" (UID: \"495eec18-4e3d-4a65-805e-48fc2c966626\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zjmkx" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.743663 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/495eec18-4e3d-4a65-805e-48fc2c966626-env-overrides\") pod \"ovnkube-control-plane-749d76644c-zjmkx\" (UID: \"495eec18-4e3d-4a65-805e-48fc2c966626\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zjmkx" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.743688 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ceb8401-3a07-422f-ae4d-14366611f4a6-metrics-certs\") pod \"network-metrics-daemon-nzcfx\" (UID: \"4ceb8401-3a07-422f-ae4d-14366611f4a6\") " pod="openshift-multus/network-metrics-daemon-nzcfx" Feb 23 09:09:30 crc kubenswrapper[4834]: E0223 09:09:30.743811 4834 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 23 09:09:30 crc kubenswrapper[4834]: E0223 09:09:30.743882 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4ceb8401-3a07-422f-ae4d-14366611f4a6-metrics-certs podName:4ceb8401-3a07-422f-ae4d-14366611f4a6 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:31.243856832 +0000 UTC m=+107.322171219 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/4ceb8401-3a07-422f-ae4d-14366611f4a6-metrics-certs") pod "network-metrics-daemon-nzcfx" (UID: "4ceb8401-3a07-422f-ae4d-14366611f4a6") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.744333 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/495eec18-4e3d-4a65-805e-48fc2c966626-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-zjmkx\" (UID: \"495eec18-4e3d-4a65-805e-48fc2c966626\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zjmkx" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.744848 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/495eec18-4e3d-4a65-805e-48fc2c966626-env-overrides\") pod \"ovnkube-control-plane-749d76644c-zjmkx\" (UID: \"495eec18-4e3d-4a65-805e-48fc2c966626\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zjmkx" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.747074 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.747108 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.747117 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.747136 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.747147 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:30Z","lastTransitionTime":"2026-02-23T09:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.747179 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/495eec18-4e3d-4a65-805e-48fc2c966626-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-zjmkx\" (UID: \"495eec18-4e3d-4a65-805e-48fc2c966626\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zjmkx" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.763795 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqx6w\" (UniqueName: \"kubernetes.io/projected/4ceb8401-3a07-422f-ae4d-14366611f4a6-kube-api-access-zqx6w\") pod \"network-metrics-daemon-nzcfx\" (UID: \"4ceb8401-3a07-422f-ae4d-14366611f4a6\") " pod="openshift-multus/network-metrics-daemon-nzcfx" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.764625 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dvc7c\" (UniqueName: \"kubernetes.io/projected/495eec18-4e3d-4a65-805e-48fc2c966626-kube-api-access-dvc7c\") pod \"ovnkube-control-plane-749d76644c-zjmkx\" (UID: \"495eec18-4e3d-4a65-805e-48fc2c966626\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zjmkx" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.780013 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-jstkv" Feb 23 09:09:30 crc kubenswrapper[4834]: W0223 09:09:30.794159 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3cc4bc05_d623_4a1e_879a_773a30d94a11.slice/crio-8dac7c0a5704067757f088703a2339ac2020f9bbee5d4238730d45fb09084ffb WatchSource:0}: Error finding container 8dac7c0a5704067757f088703a2339ac2020f9bbee5d4238730d45fb09084ffb: Status 404 returned error can't find the container with id 8dac7c0a5704067757f088703a2339ac2020f9bbee5d4238730d45fb09084ffb Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.850805 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.850868 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.850890 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.850918 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.850939 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:30Z","lastTransitionTime":"2026-02-23T09:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.929860 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zjmkx" Feb 23 09:09:30 crc kubenswrapper[4834]: W0223 09:09:30.953473 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod495eec18_4e3d_4a65_805e_48fc2c966626.slice/crio-823c158b70fa685218c386639df1f291af42189ead844e6e6d893cb30ccdbf0b WatchSource:0}: Error finding container 823c158b70fa685218c386639df1f291af42189ead844e6e6d893cb30ccdbf0b: Status 404 returned error can't find the container with id 823c158b70fa685218c386639df1f291af42189ead844e6e6d893cb30ccdbf0b Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.953745 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.953779 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.953792 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.953814 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:30 crc kubenswrapper[4834]: I0223 09:09:30.953830 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:30Z","lastTransitionTime":"2026-02-23T09:09:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.056963 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.056995 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.057005 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.057019 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.057029 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:31Z","lastTransitionTime":"2026-02-23T09:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.078368 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zjmkx" event={"ID":"495eec18-4e3d-4a65-805e-48fc2c966626","Type":"ContainerStarted","Data":"823c158b70fa685218c386639df1f291af42189ead844e6e6d893cb30ccdbf0b"} Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.082431 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-jstkv" event={"ID":"3cc4bc05-d623-4a1e-879a-773a30d94a11","Type":"ContainerStarted","Data":"a2cf70a7f24dcbd1d9cb9ee41c21372a437357339d4001e78994f9e04c20aacb"} Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.082542 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-jstkv" event={"ID":"3cc4bc05-d623-4a1e-879a-773a30d94a11","Type":"ContainerStarted","Data":"8dac7c0a5704067757f088703a2339ac2020f9bbee5d4238730d45fb09084ffb"} Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.089019 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" event={"ID":"5102eeec-7776-42da-8027-c4e5f9c13450","Type":"ContainerStarted","Data":"9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6"} Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.089070 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" event={"ID":"5102eeec-7776-42da-8027-c4e5f9c13450","Type":"ContainerStarted","Data":"5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023"} Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.091847 4834 generic.go:334] "Generic (PLEG): container finished" podID="5a0627a9-533e-4723-b891-80dc7a5b611e" containerID="cc0fada4b3bd5343eb1d3bd72ee0dc9adab75b3d0bb7a9d7ddd6627d1e7b146f" exitCode=0 Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.091890 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gtpxs" event={"ID":"5a0627a9-533e-4723-b891-80dc7a5b611e","Type":"ContainerDied","Data":"cc0fada4b3bd5343eb1d3bd72ee0dc9adab75b3d0bb7a9d7ddd6627d1e7b146f"} Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.100536 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-jstkv" podStartSLOduration=40.100503555 podStartE2EDuration="40.100503555s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:31.100355351 +0000 UTC m=+107.178669738" watchObservedRunningTime="2026-02-23 09:09:31.100503555 +0000 UTC m=+107.178817962" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.170674 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.170722 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.170732 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.170750 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.170760 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:31Z","lastTransitionTime":"2026-02-23T09:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.191900 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.191931 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.191941 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.191955 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.191966 4834 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-02-23T09:09:31Z","lastTransitionTime":"2026-02-23T09:09:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.241265 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-9kvql"] Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.241745 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9kvql" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.243483 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.243770 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.244056 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.244269 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.252870 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f3103cc-ec41-4acd-9229-c77a05120d6a-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-9kvql\" (UID: \"1f3103cc-ec41-4acd-9229-c77a05120d6a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9kvql" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.252914 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1f3103cc-ec41-4acd-9229-c77a05120d6a-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-9kvql\" (UID: \"1f3103cc-ec41-4acd-9229-c77a05120d6a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9kvql" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.252939 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1f3103cc-ec41-4acd-9229-c77a05120d6a-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-9kvql\" (UID: \"1f3103cc-ec41-4acd-9229-c77a05120d6a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9kvql" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.252989 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1f3103cc-ec41-4acd-9229-c77a05120d6a-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-9kvql\" (UID: \"1f3103cc-ec41-4acd-9229-c77a05120d6a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9kvql" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.253023 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ceb8401-3a07-422f-ae4d-14366611f4a6-metrics-certs\") pod \"network-metrics-daemon-nzcfx\" (UID: \"4ceb8401-3a07-422f-ae4d-14366611f4a6\") " pod="openshift-multus/network-metrics-daemon-nzcfx" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.253080 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1f3103cc-ec41-4acd-9229-c77a05120d6a-service-ca\") pod \"cluster-version-operator-5c965bbfc6-9kvql\" (UID: \"1f3103cc-ec41-4acd-9229-c77a05120d6a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9kvql" Feb 23 09:09:31 crc kubenswrapper[4834]: E0223 09:09:31.253139 4834 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 23 09:09:31 crc kubenswrapper[4834]: E0223 09:09:31.253204 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4ceb8401-3a07-422f-ae4d-14366611f4a6-metrics-certs podName:4ceb8401-3a07-422f-ae4d-14366611f4a6 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:32.253183726 +0000 UTC m=+108.331498113 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/4ceb8401-3a07-422f-ae4d-14366611f4a6-metrics-certs") pod "network-metrics-daemon-nzcfx" (UID: "4ceb8401-3a07-422f-ae4d-14366611f4a6") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.354617 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f3103cc-ec41-4acd-9229-c77a05120d6a-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-9kvql\" (UID: \"1f3103cc-ec41-4acd-9229-c77a05120d6a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9kvql" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.354662 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1f3103cc-ec41-4acd-9229-c77a05120d6a-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-9kvql\" (UID: \"1f3103cc-ec41-4acd-9229-c77a05120d6a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9kvql" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.354694 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1f3103cc-ec41-4acd-9229-c77a05120d6a-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-9kvql\" (UID: \"1f3103cc-ec41-4acd-9229-c77a05120d6a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9kvql" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.354741 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1f3103cc-ec41-4acd-9229-c77a05120d6a-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-9kvql\" (UID: \"1f3103cc-ec41-4acd-9229-c77a05120d6a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9kvql" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.354805 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1f3103cc-ec41-4acd-9229-c77a05120d6a-service-ca\") pod \"cluster-version-operator-5c965bbfc6-9kvql\" (UID: \"1f3103cc-ec41-4acd-9229-c77a05120d6a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9kvql" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.354838 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/1f3103cc-ec41-4acd-9229-c77a05120d6a-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-9kvql\" (UID: \"1f3103cc-ec41-4acd-9229-c77a05120d6a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9kvql" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.354893 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/1f3103cc-ec41-4acd-9229-c77a05120d6a-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-9kvql\" (UID: \"1f3103cc-ec41-4acd-9229-c77a05120d6a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9kvql" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.355747 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/1f3103cc-ec41-4acd-9229-c77a05120d6a-service-ca\") pod \"cluster-version-operator-5c965bbfc6-9kvql\" (UID: \"1f3103cc-ec41-4acd-9229-c77a05120d6a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9kvql" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.360299 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1f3103cc-ec41-4acd-9229-c77a05120d6a-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-9kvql\" (UID: \"1f3103cc-ec41-4acd-9229-c77a05120d6a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9kvql" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.370646 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1f3103cc-ec41-4acd-9229-c77a05120d6a-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-9kvql\" (UID: \"1f3103cc-ec41-4acd-9229-c77a05120d6a\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9kvql" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.557973 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9kvql" Feb 23 09:09:31 crc kubenswrapper[4834]: W0223 09:09:31.575736 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1f3103cc_ec41_4acd_9229_c77a05120d6a.slice/crio-0daea6e938ceb41394ced7838dd0e8a9c8863c873713539358bb7610eb844bf8 WatchSource:0}: Error finding container 0daea6e938ceb41394ced7838dd0e8a9c8863c873713539358bb7610eb844bf8: Status 404 returned error can't find the container with id 0daea6e938ceb41394ced7838dd0e8a9c8863c873713539358bb7610eb844bf8 Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.584653 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.584722 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.584649 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:31 crc kubenswrapper[4834]: E0223 09:09:31.584803 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 23 09:09:31 crc kubenswrapper[4834]: E0223 09:09:31.584952 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 23 09:09:31 crc kubenswrapper[4834]: E0223 09:09:31.585115 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.597565 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-20 03:57:42.105477478 +0000 UTC Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.597642 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Feb 23 09:09:31 crc kubenswrapper[4834]: I0223 09:09:31.606531 4834 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Feb 23 09:09:32 crc kubenswrapper[4834]: I0223 09:09:32.100712 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zjmkx" event={"ID":"495eec18-4e3d-4a65-805e-48fc2c966626","Type":"ContainerStarted","Data":"0edea7c717a3a02c6e3ca484ddcbf6e2866537ce722403d6323a9872d0cddd12"} Feb 23 09:09:32 crc kubenswrapper[4834]: I0223 09:09:32.100795 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zjmkx" event={"ID":"495eec18-4e3d-4a65-805e-48fc2c966626","Type":"ContainerStarted","Data":"05f01dbca1949fc4a6ae2f6603d0554ef7dcfe6344abe2b55d9b1cbceee5711a"} Feb 23 09:09:32 crc kubenswrapper[4834]: I0223 09:09:32.104658 4834 generic.go:334] "Generic (PLEG): container finished" podID="5a0627a9-533e-4723-b891-80dc7a5b611e" containerID="e51ec0a3d29b483ab4caca7734ecff948c9141a9a5e1cbba1a25bf2aeb0571db" exitCode=0 Feb 23 09:09:32 crc kubenswrapper[4834]: I0223 09:09:32.104748 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gtpxs" event={"ID":"5a0627a9-533e-4723-b891-80dc7a5b611e","Type":"ContainerDied","Data":"e51ec0a3d29b483ab4caca7734ecff948c9141a9a5e1cbba1a25bf2aeb0571db"} Feb 23 09:09:32 crc kubenswrapper[4834]: I0223 09:09:32.106972 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9kvql" event={"ID":"1f3103cc-ec41-4acd-9229-c77a05120d6a","Type":"ContainerStarted","Data":"b8ec27877bcbb9ad1b0c77417b329995e4365ae919930e99fd0b9ac46b5e2b52"} Feb 23 09:09:32 crc kubenswrapper[4834]: I0223 09:09:32.107011 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9kvql" event={"ID":"1f3103cc-ec41-4acd-9229-c77a05120d6a","Type":"ContainerStarted","Data":"0daea6e938ceb41394ced7838dd0e8a9c8863c873713539358bb7610eb844bf8"} Feb 23 09:09:32 crc kubenswrapper[4834]: I0223 09:09:32.165948 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-zjmkx" podStartSLOduration=40.165920759 podStartE2EDuration="40.165920759s" podCreationTimestamp="2026-02-23 09:08:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:32.137325476 +0000 UTC m=+108.215639873" watchObservedRunningTime="2026-02-23 09:09:32.165920759 +0000 UTC m=+108.244235176" Feb 23 09:09:32 crc kubenswrapper[4834]: I0223 09:09:32.166681 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-9kvql" podStartSLOduration=41.166672199 podStartE2EDuration="41.166672199s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:32.162550295 +0000 UTC m=+108.240864712" watchObservedRunningTime="2026-02-23 09:09:32.166672199 +0000 UTC m=+108.244986626" Feb 23 09:09:32 crc kubenswrapper[4834]: I0223 09:09:32.264631 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ceb8401-3a07-422f-ae4d-14366611f4a6-metrics-certs\") pod \"network-metrics-daemon-nzcfx\" (UID: \"4ceb8401-3a07-422f-ae4d-14366611f4a6\") " pod="openshift-multus/network-metrics-daemon-nzcfx" Feb 23 09:09:32 crc kubenswrapper[4834]: E0223 09:09:32.264822 4834 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 23 09:09:32 crc kubenswrapper[4834]: E0223 09:09:32.264890 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4ceb8401-3a07-422f-ae4d-14366611f4a6-metrics-certs podName:4ceb8401-3a07-422f-ae4d-14366611f4a6 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:34.26486687 +0000 UTC m=+110.343181257 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/4ceb8401-3a07-422f-ae4d-14366611f4a6-metrics-certs") pod "network-metrics-daemon-nzcfx" (UID: "4ceb8401-3a07-422f-ae4d-14366611f4a6") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 23 09:09:32 crc kubenswrapper[4834]: I0223 09:09:32.584282 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nzcfx" Feb 23 09:09:32 crc kubenswrapper[4834]: E0223 09:09:32.584460 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nzcfx" podUID="4ceb8401-3a07-422f-ae4d-14366611f4a6" Feb 23 09:09:33 crc kubenswrapper[4834]: I0223 09:09:33.117029 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" event={"ID":"5102eeec-7776-42da-8027-c4e5f9c13450","Type":"ContainerStarted","Data":"af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529"} Feb 23 09:09:33 crc kubenswrapper[4834]: I0223 09:09:33.121675 4834 generic.go:334] "Generic (PLEG): container finished" podID="5a0627a9-533e-4723-b891-80dc7a5b611e" containerID="6e2dc9d4b12b87c747169facc6d99c149b8ec3595452cf410265f30142ef6972" exitCode=0 Feb 23 09:09:33 crc kubenswrapper[4834]: I0223 09:09:33.121863 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gtpxs" event={"ID":"5a0627a9-533e-4723-b891-80dc7a5b611e","Type":"ContainerDied","Data":"6e2dc9d4b12b87c747169facc6d99c149b8ec3595452cf410265f30142ef6972"} Feb 23 09:09:33 crc kubenswrapper[4834]: I0223 09:09:33.585062 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:33 crc kubenswrapper[4834]: E0223 09:09:33.585781 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 23 09:09:33 crc kubenswrapper[4834]: I0223 09:09:33.585576 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:33 crc kubenswrapper[4834]: I0223 09:09:33.585553 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:33 crc kubenswrapper[4834]: E0223 09:09:33.585919 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 23 09:09:33 crc kubenswrapper[4834]: E0223 09:09:33.586004 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 23 09:09:34 crc kubenswrapper[4834]: I0223 09:09:34.130021 4834 generic.go:334] "Generic (PLEG): container finished" podID="5a0627a9-533e-4723-b891-80dc7a5b611e" containerID="1d24f2696b822b40af4f8b292f140f0de54be9cb04d531e5d4e98749b517fe4b" exitCode=0 Feb 23 09:09:34 crc kubenswrapper[4834]: I0223 09:09:34.130121 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gtpxs" event={"ID":"5a0627a9-533e-4723-b891-80dc7a5b611e","Type":"ContainerDied","Data":"1d24f2696b822b40af4f8b292f140f0de54be9cb04d531e5d4e98749b517fe4b"} Feb 23 09:09:34 crc kubenswrapper[4834]: I0223 09:09:34.295186 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ceb8401-3a07-422f-ae4d-14366611f4a6-metrics-certs\") pod \"network-metrics-daemon-nzcfx\" (UID: \"4ceb8401-3a07-422f-ae4d-14366611f4a6\") " pod="openshift-multus/network-metrics-daemon-nzcfx" Feb 23 09:09:34 crc kubenswrapper[4834]: E0223 09:09:34.295636 4834 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 23 09:09:34 crc kubenswrapper[4834]: E0223 09:09:34.295721 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4ceb8401-3a07-422f-ae4d-14366611f4a6-metrics-certs podName:4ceb8401-3a07-422f-ae4d-14366611f4a6 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:38.295691015 +0000 UTC m=+114.374005402 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/4ceb8401-3a07-422f-ae4d-14366611f4a6-metrics-certs") pod "network-metrics-daemon-nzcfx" (UID: "4ceb8401-3a07-422f-ae4d-14366611f4a6") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 23 09:09:34 crc kubenswrapper[4834]: I0223 09:09:34.585910 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nzcfx" Feb 23 09:09:34 crc kubenswrapper[4834]: E0223 09:09:34.586023 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nzcfx" podUID="4ceb8401-3a07-422f-ae4d-14366611f4a6" Feb 23 09:09:35 crc kubenswrapper[4834]: I0223 09:09:35.148714 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" event={"ID":"5102eeec-7776-42da-8027-c4e5f9c13450","Type":"ContainerStarted","Data":"094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6"} Feb 23 09:09:35 crc kubenswrapper[4834]: I0223 09:09:35.149211 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:35 crc kubenswrapper[4834]: I0223 09:09:35.153881 4834 generic.go:334] "Generic (PLEG): container finished" podID="5a0627a9-533e-4723-b891-80dc7a5b611e" containerID="77f861851ee2e4bd61c36f98c763f6e28af01a042c86a24d7e22767880ddff6a" exitCode=0 Feb 23 09:09:35 crc kubenswrapper[4834]: I0223 09:09:35.153997 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gtpxs" event={"ID":"5a0627a9-533e-4723-b891-80dc7a5b611e","Type":"ContainerDied","Data":"77f861851ee2e4bd61c36f98c763f6e28af01a042c86a24d7e22767880ddff6a"} Feb 23 09:09:35 crc kubenswrapper[4834]: I0223 09:09:35.192033 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" podStartSLOduration=43.192005673 podStartE2EDuration="43.192005673s" podCreationTimestamp="2026-02-23 09:08:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:35.189558345 +0000 UTC m=+111.267872772" watchObservedRunningTime="2026-02-23 09:09:35.192005673 +0000 UTC m=+111.270320070" Feb 23 09:09:35 crc kubenswrapper[4834]: I0223 09:09:35.197700 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:35 crc kubenswrapper[4834]: I0223 09:09:35.507579 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:35 crc kubenswrapper[4834]: I0223 09:09:35.507763 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:35 crc kubenswrapper[4834]: E0223 09:09:35.507795 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:10:07.507755383 +0000 UTC m=+143.586069780 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:35 crc kubenswrapper[4834]: I0223 09:09:35.507865 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:35 crc kubenswrapper[4834]: E0223 09:09:35.507912 4834 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 23 09:09:35 crc kubenswrapper[4834]: I0223 09:09:35.507919 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:35 crc kubenswrapper[4834]: I0223 09:09:35.507960 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:35 crc kubenswrapper[4834]: E0223 09:09:35.507982 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-23 09:10:07.507960068 +0000 UTC m=+143.586274455 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Feb 23 09:09:35 crc kubenswrapper[4834]: E0223 09:09:35.508003 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 23 09:09:35 crc kubenswrapper[4834]: E0223 09:09:35.508022 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 23 09:09:35 crc kubenswrapper[4834]: E0223 09:09:35.508034 4834 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:35 crc kubenswrapper[4834]: E0223 09:09:35.508065 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-02-23 09:10:07.508057131 +0000 UTC m=+143.586371518 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:35 crc kubenswrapper[4834]: E0223 09:09:35.508209 4834 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Feb 23 09:09:35 crc kubenswrapper[4834]: E0223 09:09:35.508222 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Feb 23 09:09:35 crc kubenswrapper[4834]: E0223 09:09:35.508388 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-02-23 09:10:07.508352889 +0000 UTC m=+143.586667316 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Feb 23 09:09:35 crc kubenswrapper[4834]: E0223 09:09:35.508412 4834 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Feb 23 09:09:35 crc kubenswrapper[4834]: E0223 09:09:35.508469 4834 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:35 crc kubenswrapper[4834]: E0223 09:09:35.508537 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-02-23 09:10:07.508522444 +0000 UTC m=+143.586836881 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Feb 23 09:09:35 crc kubenswrapper[4834]: I0223 09:09:35.584625 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:35 crc kubenswrapper[4834]: I0223 09:09:35.584694 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:35 crc kubenswrapper[4834]: I0223 09:09:35.584643 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:35 crc kubenswrapper[4834]: E0223 09:09:35.584779 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 23 09:09:35 crc kubenswrapper[4834]: E0223 09:09:35.584837 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 23 09:09:35 crc kubenswrapper[4834]: E0223 09:09:35.585342 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 23 09:09:35 crc kubenswrapper[4834]: I0223 09:09:35.585569 4834 scope.go:117] "RemoveContainer" containerID="081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d" Feb 23 09:09:36 crc kubenswrapper[4834]: I0223 09:09:36.171913 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-gtpxs" event={"ID":"5a0627a9-533e-4723-b891-80dc7a5b611e","Type":"ContainerStarted","Data":"4ecbc30cfa1fac6f4d10d8032c4b7e7a8c061dcb402c8915f6fc482c6732520d"} Feb 23 09:09:36 crc kubenswrapper[4834]: I0223 09:09:36.174180 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/3.log" Feb 23 09:09:36 crc kubenswrapper[4834]: I0223 09:09:36.176679 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e88acae3be30837dd0bb120eebb551d0d75cff7fb45c39c26d6544ee7f576778"} Feb 23 09:09:36 crc kubenswrapper[4834]: I0223 09:09:36.177155 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:09:36 crc kubenswrapper[4834]: I0223 09:09:36.178900 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"f9075b6bc87d992a8a14d100fecdc5c2ce212ab035b188ab2ce3ee64b6f25936"} Feb 23 09:09:36 crc kubenswrapper[4834]: I0223 09:09:36.179234 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:36 crc kubenswrapper[4834]: I0223 09:09:36.179277 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:36 crc kubenswrapper[4834]: I0223 09:09:36.206465 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-gtpxs" podStartSLOduration=45.206440793 podStartE2EDuration="45.206440793s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:36.202312228 +0000 UTC m=+112.280626635" watchObservedRunningTime="2026-02-23 09:09:36.206440793 +0000 UTC m=+112.284755200" Feb 23 09:09:36 crc kubenswrapper[4834]: I0223 09:09:36.223645 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:36 crc kubenswrapper[4834]: I0223 09:09:36.249074 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=28.249053373 podStartE2EDuration="28.249053373s" podCreationTimestamp="2026-02-23 09:09:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:36.248160059 +0000 UTC m=+112.326474446" watchObservedRunningTime="2026-02-23 09:09:36.249053373 +0000 UTC m=+112.327367750" Feb 23 09:09:36 crc kubenswrapper[4834]: I0223 09:09:36.584563 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nzcfx" Feb 23 09:09:36 crc kubenswrapper[4834]: E0223 09:09:36.584756 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nzcfx" podUID="4ceb8401-3a07-422f-ae4d-14366611f4a6" Feb 23 09:09:37 crc kubenswrapper[4834]: I0223 09:09:37.001193 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-nzcfx"] Feb 23 09:09:37 crc kubenswrapper[4834]: I0223 09:09:37.183169 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nzcfx" Feb 23 09:09:37 crc kubenswrapper[4834]: E0223 09:09:37.184437 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nzcfx" podUID="4ceb8401-3a07-422f-ae4d-14366611f4a6" Feb 23 09:09:37 crc kubenswrapper[4834]: I0223 09:09:37.584297 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:37 crc kubenswrapper[4834]: I0223 09:09:37.584337 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:37 crc kubenswrapper[4834]: I0223 09:09:37.584357 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:37 crc kubenswrapper[4834]: E0223 09:09:37.584453 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Feb 23 09:09:37 crc kubenswrapper[4834]: E0223 09:09:37.584657 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Feb 23 09:09:37 crc kubenswrapper[4834]: E0223 09:09:37.584856 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Feb 23 09:09:38 crc kubenswrapper[4834]: I0223 09:09:38.339269 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ceb8401-3a07-422f-ae4d-14366611f4a6-metrics-certs\") pod \"network-metrics-daemon-nzcfx\" (UID: \"4ceb8401-3a07-422f-ae4d-14366611f4a6\") " pod="openshift-multus/network-metrics-daemon-nzcfx" Feb 23 09:09:38 crc kubenswrapper[4834]: E0223 09:09:38.339443 4834 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Feb 23 09:09:38 crc kubenswrapper[4834]: E0223 09:09:38.339507 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/4ceb8401-3a07-422f-ae4d-14366611f4a6-metrics-certs podName:4ceb8401-3a07-422f-ae4d-14366611f4a6 nodeName:}" failed. No retries permitted until 2026-02-23 09:09:46.339484539 +0000 UTC m=+122.417798926 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/4ceb8401-3a07-422f-ae4d-14366611f4a6-metrics-certs") pod "network-metrics-daemon-nzcfx" (UID: "4ceb8401-3a07-422f-ae4d-14366611f4a6") : object "openshift-multus"/"metrics-daemon-secret" not registered Feb 23 09:09:38 crc kubenswrapper[4834]: I0223 09:09:38.584499 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nzcfx" Feb 23 09:09:38 crc kubenswrapper[4834]: E0223 09:09:38.584664 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-nzcfx" podUID="4ceb8401-3a07-422f-ae4d-14366611f4a6" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.029483 4834 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.029661 4834 kubelet_node_status.go:538] "Fast updating node status as it just became ready" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.076167 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-bjngd"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.077232 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.077387 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-bjngd" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.077985 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-8mk2m"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.078463 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.078548 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.079206 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.079248 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4p48q"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.079736 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.079266 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.097132 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.100034 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.106032 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.106319 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.107184 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.107582 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.108116 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.108354 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.108524 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.108636 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.108676 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.108835 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.109289 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.110742 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.111298 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.111472 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.111616 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.112566 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.112893 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.113162 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.114513 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.114611 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.114725 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.114820 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.114847 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.114910 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.115006 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.115075 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.115105 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.115210 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.115260 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.115345 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.115465 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.115725 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.115797 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.115878 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.114549 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-qt6tc"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.116128 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.116381 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.119290 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-qt6tc" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.124839 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-897nm"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.126897 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v7mzh"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.127079 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.127784 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v7mzh" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.128379 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-g9jpx"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.129174 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.148625 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.149464 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7df8904d-d89c-41c3-b207-795a41e7cd3f-audit-policies\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.149510 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3b0c4f5d-0e85-4f0e-be00-ed9f70d3d578-config\") pod \"openshift-apiserver-operator-796bbdcf4f-v7mzh\" (UID: \"3b0c4f5d-0e85-4f0e-be00-ed9f70d3d578\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v7mzh" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.149540 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0f47419a-2e53-440b-854b-9fd226fb17d2-images\") pod \"machine-api-operator-5694c8668f-bjngd\" (UID: \"0f47419a-2e53-440b-854b-9fd226fb17d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bjngd" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.149568 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.149595 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/782436e6-7d7f-4e44-afe6-542014b15e86-encryption-config\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.149623 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/22033762-41c1-4e84-88d5-59187abf701f-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-qt6tc\" (UID: \"22033762-41c1-4e84-88d5-59187abf701f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qt6tc" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.149645 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/782436e6-7d7f-4e44-afe6-542014b15e86-etcd-serving-ca\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.149665 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-audit-policies\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.149689 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.149711 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.149739 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/0f47419a-2e53-440b-854b-9fd226fb17d2-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-bjngd\" (UID: \"0f47419a-2e53-440b-854b-9fd226fb17d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bjngd" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.149764 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-audit-dir\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.149787 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6qdvx\" (UniqueName: \"kubernetes.io/projected/7df8904d-d89c-41c3-b207-795a41e7cd3f-kube-api-access-6qdvx\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.149809 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c628e572-63b9-478b-bf3a-6ff1966480a1-config\") pod \"route-controller-manager-6576b87f9c-xslpt\" (UID: \"c628e572-63b9-478b-bf3a-6ff1966480a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.149834 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.149857 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/782436e6-7d7f-4e44-afe6-542014b15e86-serving-cert\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.149880 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7df8904d-d89c-41c3-b207-795a41e7cd3f-audit-dir\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.149903 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c628e572-63b9-478b-bf3a-6ff1966480a1-serving-cert\") pod \"route-controller-manager-6576b87f9c-xslpt\" (UID: \"c628e572-63b9-478b-bf3a-6ff1966480a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.149928 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.149954 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150011 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/782436e6-7d7f-4e44-afe6-542014b15e86-node-pullsecrets\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150037 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/24eb6775-1135-4cc7-9e62-103e142f285a-trusted-ca-bundle\") pod \"console-f9d7485db-g9jpx\" (UID: \"24eb6775-1135-4cc7-9e62-103e142f285a\") " pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150086 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/782436e6-7d7f-4e44-afe6-542014b15e86-audit\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150111 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkc6n\" (UniqueName: \"kubernetes.io/projected/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-kube-api-access-dkc6n\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150137 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22033762-41c1-4e84-88d5-59187abf701f-config\") pod \"authentication-operator-69f744f599-qt6tc\" (UID: \"22033762-41c1-4e84-88d5-59187abf701f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qt6tc" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150162 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-config\") pod \"controller-manager-879f6c89f-8mk2m\" (UID: \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150187 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/24eb6775-1135-4cc7-9e62-103e142f285a-console-serving-cert\") pod \"console-f9d7485db-g9jpx\" (UID: \"24eb6775-1135-4cc7-9e62-103e142f285a\") " pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150220 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/782436e6-7d7f-4e44-afe6-542014b15e86-config\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150245 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-client-ca\") pod \"controller-manager-879f6c89f-8mk2m\" (UID: \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150270 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/22033762-41c1-4e84-88d5-59187abf701f-service-ca-bundle\") pod \"authentication-operator-69f744f599-qt6tc\" (UID: \"22033762-41c1-4e84-88d5-59187abf701f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qt6tc" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150293 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c628e572-63b9-478b-bf3a-6ff1966480a1-client-ca\") pod \"route-controller-manager-6576b87f9c-xslpt\" (UID: \"c628e572-63b9-478b-bf3a-6ff1966480a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150315 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p6pts\" (UniqueName: \"kubernetes.io/projected/c628e572-63b9-478b-bf3a-6ff1966480a1-kube-api-access-p6pts\") pod \"route-controller-manager-6576b87f9c-xslpt\" (UID: \"c628e572-63b9-478b-bf3a-6ff1966480a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150353 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150379 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/24eb6775-1135-4cc7-9e62-103e142f285a-oauth-serving-cert\") pod \"console-f9d7485db-g9jpx\" (UID: \"24eb6775-1135-4cc7-9e62-103e142f285a\") " pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150408 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/7df8904d-d89c-41c3-b207-795a41e7cd3f-encryption-config\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150450 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klsth\" (UniqueName: \"kubernetes.io/projected/782436e6-7d7f-4e44-afe6-542014b15e86-kube-api-access-klsth\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150503 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/24eb6775-1135-4cc7-9e62-103e142f285a-console-oauth-config\") pod \"console-f9d7485db-g9jpx\" (UID: \"24eb6775-1135-4cc7-9e62-103e142f285a\") " pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150528 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/782436e6-7d7f-4e44-afe6-542014b15e86-trusted-ca-bundle\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150550 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3b0c4f5d-0e85-4f0e-be00-ed9f70d3d578-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-v7mzh\" (UID: \"3b0c4f5d-0e85-4f0e-be00-ed9f70d3d578\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v7mzh" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150573 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150595 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-8mk2m\" (UID: \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150622 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7df8904d-d89c-41c3-b207-795a41e7cd3f-serving-cert\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150648 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/782436e6-7d7f-4e44-afe6-542014b15e86-audit-dir\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150670 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67hvc\" (UniqueName: \"kubernetes.io/projected/3b0c4f5d-0e85-4f0e-be00-ed9f70d3d578-kube-api-access-67hvc\") pod \"openshift-apiserver-operator-796bbdcf4f-v7mzh\" (UID: \"3b0c4f5d-0e85-4f0e-be00-ed9f70d3d578\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v7mzh" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150695 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/22033762-41c1-4e84-88d5-59187abf701f-serving-cert\") pod \"authentication-operator-69f744f599-qt6tc\" (UID: \"22033762-41c1-4e84-88d5-59187abf701f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qt6tc" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150717 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-serving-cert\") pod \"controller-manager-879f6c89f-8mk2m\" (UID: \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150741 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150766 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsmhs\" (UniqueName: \"kubernetes.io/projected/0f47419a-2e53-440b-854b-9fd226fb17d2-kube-api-access-nsmhs\") pod \"machine-api-operator-5694c8668f-bjngd\" (UID: \"0f47419a-2e53-440b-854b-9fd226fb17d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bjngd" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150789 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f47419a-2e53-440b-854b-9fd226fb17d2-config\") pod \"machine-api-operator-5694c8668f-bjngd\" (UID: \"0f47419a-2e53-440b-854b-9fd226fb17d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bjngd" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150811 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/7df8904d-d89c-41c3-b207-795a41e7cd3f-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150831 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/24eb6775-1135-4cc7-9e62-103e142f285a-service-ca\") pod \"console-f9d7485db-g9jpx\" (UID: \"24eb6775-1135-4cc7-9e62-103e142f285a\") " pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150855 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/7df8904d-d89c-41c3-b207-795a41e7cd3f-etcd-client\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150879 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sd288\" (UniqueName: \"kubernetes.io/projected/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-kube-api-access-sd288\") pod \"controller-manager-879f6c89f-8mk2m\" (UID: \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150903 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjs2f\" (UniqueName: \"kubernetes.io/projected/24eb6775-1135-4cc7-9e62-103e142f285a-kube-api-access-fjs2f\") pod \"console-f9d7485db-g9jpx\" (UID: \"24eb6775-1135-4cc7-9e62-103e142f285a\") " pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150926 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6rrzk\" (UniqueName: \"kubernetes.io/projected/22033762-41c1-4e84-88d5-59187abf701f-kube-api-access-6rrzk\") pod \"authentication-operator-69f744f599-qt6tc\" (UID: \"22033762-41c1-4e84-88d5-59187abf701f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qt6tc" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150951 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.150979 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/782436e6-7d7f-4e44-afe6-542014b15e86-etcd-client\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.151001 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/782436e6-7d7f-4e44-afe6-542014b15e86-image-import-ca\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.151000 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.151024 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7df8904d-d89c-41c3-b207-795a41e7cd3f-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.151049 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/24eb6775-1135-4cc7-9e62-103e142f285a-console-config\") pod \"console-f9d7485db-g9jpx\" (UID: \"24eb6775-1135-4cc7-9e62-103e142f285a\") " pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.151074 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.151334 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.151477 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.151551 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.151616 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.151862 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.152062 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.152109 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.152199 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.152226 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.152229 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.152306 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.152313 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.152562 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.152583 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.153187 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.153565 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.155762 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.156114 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.156242 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.156267 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.156441 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.156568 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.156588 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.156687 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.156760 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.157240 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.159091 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.159787 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-495q2"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.161846 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-f7vd2"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.162230 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f7vd2" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.162677 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-495q2" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.164991 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.165693 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-n6kdb"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.166486 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-n6kdb" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.168224 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.169561 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.173407 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.173725 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.174088 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8pjt4"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.174149 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.174315 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.174711 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.174900 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-mxsss"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.175324 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8pjt4" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.175407 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-mxsss" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.184550 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-lzrvp"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.184658 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.185148 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-kz6hj"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.185366 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lzrvp" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.185684 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kz6hj" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.186040 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-xzc7c"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.188117 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.188418 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.188497 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.195221 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.200649 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.200990 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.201150 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.201716 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.202001 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.202183 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.202410 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.202480 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.202605 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.202869 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.202970 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.203326 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.203371 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.215880 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.216130 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.216046 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.216286 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.217955 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.217974 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.218698 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.218856 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-rf5l7"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.219436 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-rf5l7" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.220110 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.220261 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-rcwhk"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.220862 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.222009 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.222602 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.223203 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cwqg7"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.223272 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.223626 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cwqg7" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.224278 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-crc7p"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.224698 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-crc7p" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.225708 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.231189 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-bdng5"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.233118 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-2wpmg"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.233547 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2zrzx"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.233895 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2zrzx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.233953 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-bdng5" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.234154 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2wpmg" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.234500 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jfn2h"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.235227 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jfn2h" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.242559 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-75r79"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.243232 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k9lbb"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.243652 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k9lbb" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.243695 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-75r79" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.249870 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-m8kxw"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262308 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/7df8904d-d89c-41c3-b207-795a41e7cd3f-etcd-client\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262359 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/7df8904d-d89c-41c3-b207-795a41e7cd3f-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262388 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/24eb6775-1135-4cc7-9e62-103e142f285a-service-ca\") pod \"console-f9d7485db-g9jpx\" (UID: \"24eb6775-1135-4cc7-9e62-103e142f285a\") " pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262432 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f489a96f-1839-4986-9340-e9b9d8960435-metrics-certs\") pod \"router-default-5444994796-rf5l7\" (UID: \"f489a96f-1839-4986-9340-e9b9d8960435\") " pod="openshift-ingress/router-default-5444994796-rf5l7" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262461 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sd288\" (UniqueName: \"kubernetes.io/projected/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-kube-api-access-sd288\") pod \"controller-manager-879f6c89f-8mk2m\" (UID: \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262485 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f489a96f-1839-4986-9340-e9b9d8960435-service-ca-bundle\") pod \"router-default-5444994796-rf5l7\" (UID: \"f489a96f-1839-4986-9340-e9b9d8960435\") " pod="openshift-ingress/router-default-5444994796-rf5l7" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262509 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjs2f\" (UniqueName: \"kubernetes.io/projected/24eb6775-1135-4cc7-9e62-103e142f285a-kube-api-access-fjs2f\") pod \"console-f9d7485db-g9jpx\" (UID: \"24eb6775-1135-4cc7-9e62-103e142f285a\") " pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262531 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4ab9ada9-cbae-4592-b699-e3bf33c09a95-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-2zrzx\" (UID: \"4ab9ada9-cbae-4592-b699-e3bf33c09a95\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2zrzx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262554 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6rrzk\" (UniqueName: \"kubernetes.io/projected/22033762-41c1-4e84-88d5-59187abf701f-kube-api-access-6rrzk\") pod \"authentication-operator-69f744f599-qt6tc\" (UID: \"22033762-41c1-4e84-88d5-59187abf701f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qt6tc" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262574 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tsbd\" (UniqueName: \"kubernetes.io/projected/f489a96f-1839-4986-9340-e9b9d8960435-kube-api-access-4tsbd\") pod \"router-default-5444994796-rf5l7\" (UID: \"f489a96f-1839-4986-9340-e9b9d8960435\") " pod="openshift-ingress/router-default-5444994796-rf5l7" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262597 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262623 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7df8904d-d89c-41c3-b207-795a41e7cd3f-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262640 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/24eb6775-1135-4cc7-9e62-103e142f285a-console-config\") pod \"console-f9d7485db-g9jpx\" (UID: \"24eb6775-1135-4cc7-9e62-103e142f285a\") " pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262667 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qq7hb\" (UniqueName: \"kubernetes.io/projected/fbca2b49-c933-408c-9c80-fb1202bfb6f1-kube-api-access-qq7hb\") pod \"downloads-7954f5f757-mxsss\" (UID: \"fbca2b49-c933-408c-9c80-fb1202bfb6f1\") " pod="openshift-console/downloads-7954f5f757-mxsss" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262690 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/782436e6-7d7f-4e44-afe6-542014b15e86-etcd-client\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262711 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/782436e6-7d7f-4e44-afe6-542014b15e86-image-import-ca\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262733 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262751 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kk2sl\" (UniqueName: \"kubernetes.io/projected/6e72626f-db77-4945-8fb1-48c1d7507251-kube-api-access-kk2sl\") pod \"dns-operator-744455d44c-n6kdb\" (UID: \"6e72626f-db77-4945-8fb1-48c1d7507251\") " pod="openshift-dns-operator/dns-operator-744455d44c-n6kdb" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262773 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7df8904d-d89c-41c3-b207-795a41e7cd3f-audit-policies\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262805 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0f47419a-2e53-440b-854b-9fd226fb17d2-images\") pod \"machine-api-operator-5694c8668f-bjngd\" (UID: \"0f47419a-2e53-440b-854b-9fd226fb17d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bjngd" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262826 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4ab9ada9-cbae-4592-b699-e3bf33c09a95-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-2zrzx\" (UID: \"4ab9ada9-cbae-4592-b699-e3bf33c09a95\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2zrzx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262851 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3b0c4f5d-0e85-4f0e-be00-ed9f70d3d578-config\") pod \"openshift-apiserver-operator-796bbdcf4f-v7mzh\" (UID: \"3b0c4f5d-0e85-4f0e-be00-ed9f70d3d578\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v7mzh" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262877 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262902 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/782436e6-7d7f-4e44-afe6-542014b15e86-encryption-config\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262924 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6e72626f-db77-4945-8fb1-48c1d7507251-metrics-tls\") pod \"dns-operator-744455d44c-n6kdb\" (UID: \"6e72626f-db77-4945-8fb1-48c1d7507251\") " pod="openshift-dns-operator/dns-operator-744455d44c-n6kdb" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262950 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-audit-policies\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262971 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/22033762-41c1-4e84-88d5-59187abf701f-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-qt6tc\" (UID: \"22033762-41c1-4e84-88d5-59187abf701f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qt6tc" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.262993 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/782436e6-7d7f-4e44-afe6-542014b15e86-etcd-serving-ca\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263030 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263054 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263074 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/f489a96f-1839-4986-9340-e9b9d8960435-stats-auth\") pod \"router-default-5444994796-rf5l7\" (UID: \"f489a96f-1839-4986-9340-e9b9d8960435\") " pod="openshift-ingress/router-default-5444994796-rf5l7" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263098 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dsfrj\" (UniqueName: \"kubernetes.io/projected/4ae41025-47a2-4192-8650-e3bec3a0a8f7-kube-api-access-dsfrj\") pod \"ingress-operator-5b745b69d9-kz6hj\" (UID: \"4ae41025-47a2-4192-8650-e3bec3a0a8f7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kz6hj" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263121 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/0f47419a-2e53-440b-854b-9fd226fb17d2-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-bjngd\" (UID: \"0f47419a-2e53-440b-854b-9fd226fb17d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bjngd" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263141 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4ae41025-47a2-4192-8650-e3bec3a0a8f7-bound-sa-token\") pod \"ingress-operator-5b745b69d9-kz6hj\" (UID: \"4ae41025-47a2-4192-8650-e3bec3a0a8f7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kz6hj" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263166 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c628e572-63b9-478b-bf3a-6ff1966480a1-config\") pod \"route-controller-manager-6576b87f9c-xslpt\" (UID: \"c628e572-63b9-478b-bf3a-6ff1966480a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263184 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-audit-dir\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263204 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6qdvx\" (UniqueName: \"kubernetes.io/projected/7df8904d-d89c-41c3-b207-795a41e7cd3f-kube-api-access-6qdvx\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263225 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263245 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4ae41025-47a2-4192-8650-e3bec3a0a8f7-trusted-ca\") pod \"ingress-operator-5b745b69d9-kz6hj\" (UID: \"4ae41025-47a2-4192-8650-e3bec3a0a8f7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kz6hj" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263263 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c628e572-63b9-478b-bf3a-6ff1966480a1-serving-cert\") pod \"route-controller-manager-6576b87f9c-xslpt\" (UID: \"c628e572-63b9-478b-bf3a-6ff1966480a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263285 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/782436e6-7d7f-4e44-afe6-542014b15e86-serving-cert\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263305 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7df8904d-d89c-41c3-b207-795a41e7cd3f-audit-dir\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263326 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/782436e6-7d7f-4e44-afe6-542014b15e86-node-pullsecrets\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263347 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263365 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263404 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36d9e9ae-0e67-441b-bbdb-a5292cba2360-config\") pod \"machine-approver-56656f9798-f7vd2\" (UID: \"36d9e9ae-0e67-441b-bbdb-a5292cba2360\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f7vd2" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263445 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/782436e6-7d7f-4e44-afe6-542014b15e86-audit\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263465 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkc6n\" (UniqueName: \"kubernetes.io/projected/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-kube-api-access-dkc6n\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263485 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/24eb6775-1135-4cc7-9e62-103e142f285a-trusted-ca-bundle\") pod \"console-f9d7485db-g9jpx\" (UID: \"24eb6775-1135-4cc7-9e62-103e142f285a\") " pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263505 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gqxvf\" (UniqueName: \"kubernetes.io/projected/4ab9ada9-cbae-4592-b699-e3bf33c09a95-kube-api-access-gqxvf\") pod \"openshift-controller-manager-operator-756b6f6bc6-2zrzx\" (UID: \"4ab9ada9-cbae-4592-b699-e3bf33c09a95\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2zrzx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263560 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/36d9e9ae-0e67-441b-bbdb-a5292cba2360-auth-proxy-config\") pod \"machine-approver-56656f9798-f7vd2\" (UID: \"36d9e9ae-0e67-441b-bbdb-a5292cba2360\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f7vd2" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263583 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/24eb6775-1135-4cc7-9e62-103e142f285a-console-serving-cert\") pod \"console-f9d7485db-g9jpx\" (UID: \"24eb6775-1135-4cc7-9e62-103e142f285a\") " pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263600 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4ae41025-47a2-4192-8650-e3bec3a0a8f7-metrics-tls\") pod \"ingress-operator-5b745b69d9-kz6hj\" (UID: \"4ae41025-47a2-4192-8650-e3bec3a0a8f7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kz6hj" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.263622 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22033762-41c1-4e84-88d5-59187abf701f-config\") pod \"authentication-operator-69f744f599-qt6tc\" (UID: \"22033762-41c1-4e84-88d5-59187abf701f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qt6tc" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266306 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-config\") pod \"controller-manager-879f6c89f-8mk2m\" (UID: \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266387 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-client-ca\") pod \"controller-manager-879f6c89f-8mk2m\" (UID: \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266415 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/36d9e9ae-0e67-441b-bbdb-a5292cba2360-machine-approver-tls\") pod \"machine-approver-56656f9798-f7vd2\" (UID: \"36d9e9ae-0e67-441b-bbdb-a5292cba2360\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f7vd2" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266476 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/782436e6-7d7f-4e44-afe6-542014b15e86-config\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266499 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p6pts\" (UniqueName: \"kubernetes.io/projected/c628e572-63b9-478b-bf3a-6ff1966480a1-kube-api-access-p6pts\") pod \"route-controller-manager-6576b87f9c-xslpt\" (UID: \"c628e572-63b9-478b-bf3a-6ff1966480a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266521 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/22033762-41c1-4e84-88d5-59187abf701f-service-ca-bundle\") pod \"authentication-operator-69f744f599-qt6tc\" (UID: \"22033762-41c1-4e84-88d5-59187abf701f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qt6tc" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266543 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c628e572-63b9-478b-bf3a-6ff1966480a1-client-ca\") pod \"route-controller-manager-6576b87f9c-xslpt\" (UID: \"c628e572-63b9-478b-bf3a-6ff1966480a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266571 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266595 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/24eb6775-1135-4cc7-9e62-103e142f285a-oauth-serving-cert\") pod \"console-f9d7485db-g9jpx\" (UID: \"24eb6775-1135-4cc7-9e62-103e142f285a\") " pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266632 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/7df8904d-d89c-41c3-b207-795a41e7cd3f-encryption-config\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266657 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qskkb\" (UniqueName: \"kubernetes.io/projected/36d9e9ae-0e67-441b-bbdb-a5292cba2360-kube-api-access-qskkb\") pod \"machine-approver-56656f9798-f7vd2\" (UID: \"36d9e9ae-0e67-441b-bbdb-a5292cba2360\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f7vd2" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266678 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klsth\" (UniqueName: \"kubernetes.io/projected/782436e6-7d7f-4e44-afe6-542014b15e86-kube-api-access-klsth\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266695 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/24eb6775-1135-4cc7-9e62-103e142f285a-console-oauth-config\") pod \"console-f9d7485db-g9jpx\" (UID: \"24eb6775-1135-4cc7-9e62-103e142f285a\") " pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266725 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266748 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-8mk2m\" (UID: \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266774 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/782436e6-7d7f-4e44-afe6-542014b15e86-trusted-ca-bundle\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266794 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3b0c4f5d-0e85-4f0e-be00-ed9f70d3d578-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-v7mzh\" (UID: \"3b0c4f5d-0e85-4f0e-be00-ed9f70d3d578\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v7mzh" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266819 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/782436e6-7d7f-4e44-afe6-542014b15e86-audit-dir\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266855 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67hvc\" (UniqueName: \"kubernetes.io/projected/3b0c4f5d-0e85-4f0e-be00-ed9f70d3d578-kube-api-access-67hvc\") pod \"openshift-apiserver-operator-796bbdcf4f-v7mzh\" (UID: \"3b0c4f5d-0e85-4f0e-be00-ed9f70d3d578\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v7mzh" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266878 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7df8904d-d89c-41c3-b207-795a41e7cd3f-serving-cert\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266904 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/f489a96f-1839-4986-9340-e9b9d8960435-default-certificate\") pod \"router-default-5444994796-rf5l7\" (UID: \"f489a96f-1839-4986-9340-e9b9d8960435\") " pod="openshift-ingress/router-default-5444994796-rf5l7" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266927 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-serving-cert\") pod \"controller-manager-879f6c89f-8mk2m\" (UID: \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266952 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/22033762-41c1-4e84-88d5-59187abf701f-serving-cert\") pod \"authentication-operator-69f744f599-qt6tc\" (UID: \"22033762-41c1-4e84-88d5-59187abf701f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qt6tc" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.266976 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.267000 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsmhs\" (UniqueName: \"kubernetes.io/projected/0f47419a-2e53-440b-854b-9fd226fb17d2-kube-api-access-nsmhs\") pod \"machine-api-operator-5694c8668f-bjngd\" (UID: \"0f47419a-2e53-440b-854b-9fd226fb17d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bjngd" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.267022 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f47419a-2e53-440b-854b-9fd226fb17d2-config\") pod \"machine-api-operator-5694c8668f-bjngd\" (UID: \"0f47419a-2e53-440b-854b-9fd226fb17d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bjngd" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.267526 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.268119 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f47419a-2e53-440b-854b-9fd226fb17d2-config\") pod \"machine-api-operator-5694c8668f-bjngd\" (UID: \"0f47419a-2e53-440b-854b-9fd226fb17d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bjngd" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.268747 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-bjngd"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.268803 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-hlc9q"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.269797 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.270177 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-m8kxw" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.272384 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/22033762-41c1-4e84-88d5-59187abf701f-service-ca-bundle\") pod \"authentication-operator-69f744f599-qt6tc\" (UID: \"22033762-41c1-4e84-88d5-59187abf701f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qt6tc" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.273931 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/0f47419a-2e53-440b-854b-9fd226fb17d2-images\") pod \"machine-api-operator-5694c8668f-bjngd\" (UID: \"0f47419a-2e53-440b-854b-9fd226fb17d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bjngd" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.274496 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/7df8904d-d89c-41c3-b207-795a41e7cd3f-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.275789 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/7df8904d-d89c-41c3-b207-795a41e7cd3f-etcd-client\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.276547 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c628e572-63b9-478b-bf3a-6ff1966480a1-client-ca\") pod \"route-controller-manager-6576b87f9c-xslpt\" (UID: \"c628e572-63b9-478b-bf3a-6ff1966480a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.277475 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/24eb6775-1135-4cc7-9e62-103e142f285a-service-ca\") pod \"console-f9d7485db-g9jpx\" (UID: \"24eb6775-1135-4cc7-9e62-103e142f285a\") " pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.277828 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.278373 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7df8904d-d89c-41c3-b207-795a41e7cd3f-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.278534 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.278730 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3b0c4f5d-0e85-4f0e-be00-ed9f70d3d578-config\") pod \"openshift-apiserver-operator-796bbdcf4f-v7mzh\" (UID: \"3b0c4f5d-0e85-4f0e-be00-ed9f70d3d578\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v7mzh" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.279287 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c628e572-63b9-478b-bf3a-6ff1966480a1-serving-cert\") pod \"route-controller-manager-6576b87f9c-xslpt\" (UID: \"c628e572-63b9-478b-bf3a-6ff1966480a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.279869 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/24eb6775-1135-4cc7-9e62-103e142f285a-console-config\") pod \"console-f9d7485db-g9jpx\" (UID: \"24eb6775-1135-4cc7-9e62-103e142f285a\") " pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.280355 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.280363 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/782436e6-7d7f-4e44-afe6-542014b15e86-audit\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.280743 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.280959 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.281301 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/24eb6775-1135-4cc7-9e62-103e142f285a-trusted-ca-bundle\") pod \"console-f9d7485db-g9jpx\" (UID: \"24eb6775-1135-4cc7-9e62-103e142f285a\") " pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.281782 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/782436e6-7d7f-4e44-afe6-542014b15e86-image-import-ca\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.281931 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-audit-policies\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.282634 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/22033762-41c1-4e84-88d5-59187abf701f-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-qt6tc\" (UID: \"22033762-41c1-4e84-88d5-59187abf701f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qt6tc" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.283574 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-audit-dir\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.283804 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/782436e6-7d7f-4e44-afe6-542014b15e86-encryption-config\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.283870 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7df8904d-d89c-41c3-b207-795a41e7cd3f-audit-dir\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.283952 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/782436e6-7d7f-4e44-afe6-542014b15e86-node-pullsecrets\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.284177 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22033762-41c1-4e84-88d5-59187abf701f-config\") pod \"authentication-operator-69f744f599-qt6tc\" (UID: \"22033762-41c1-4e84-88d5-59187abf701f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qt6tc" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.284631 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.284767 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/782436e6-7d7f-4e44-afe6-542014b15e86-etcd-serving-ca\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.285837 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9skpv"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.286591 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-6rmcx"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.286864 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-client-ca\") pod \"controller-manager-879f6c89f-8mk2m\" (UID: \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.287284 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.287486 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rmcx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.287879 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9skpv" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.289398 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c628e572-63b9-478b-bf3a-6ff1966480a1-config\") pod \"route-controller-manager-6576b87f9c-xslpt\" (UID: \"c628e572-63b9-478b-bf3a-6ff1966480a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.289452 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.292176 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/782436e6-7d7f-4e44-afe6-542014b15e86-config\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.292260 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/24eb6775-1135-4cc7-9e62-103e142f285a-console-oauth-config\") pod \"console-f9d7485db-g9jpx\" (UID: \"24eb6775-1135-4cc7-9e62-103e142f285a\") " pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.292307 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.292702 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.292798 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/782436e6-7d7f-4e44-afe6-542014b15e86-audit-dir\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.293203 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/782436e6-7d7f-4e44-afe6-542014b15e86-etcd-client\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.293247 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/0f47419a-2e53-440b-854b-9fd226fb17d2-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-bjngd\" (UID: \"0f47419a-2e53-440b-854b-9fd226fb17d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bjngd" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.293395 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/7df8904d-d89c-41c3-b207-795a41e7cd3f-encryption-config\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.293453 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-5kkqj"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.294061 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.294751 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7df8904d-d89c-41c3-b207-795a41e7cd3f-audit-policies\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.295096 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-8mk2m\" (UID: \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.295165 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-config\") pod \"controller-manager-879f6c89f-8mk2m\" (UID: \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.296036 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3b0c4f5d-0e85-4f0e-be00-ed9f70d3d578-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-v7mzh\" (UID: \"3b0c4f5d-0e85-4f0e-be00-ed9f70d3d578\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v7mzh" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.296462 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/782436e6-7d7f-4e44-afe6-542014b15e86-trusted-ca-bundle\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.296517 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.298008 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-serving-cert\") pod \"controller-manager-879f6c89f-8mk2m\" (UID: \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.298120 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/24eb6775-1135-4cc7-9e62-103e142f285a-oauth-serving-cert\") pod \"console-f9d7485db-g9jpx\" (UID: \"24eb6775-1135-4cc7-9e62-103e142f285a\") " pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.298897 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/22033762-41c1-4e84-88d5-59187abf701f-serving-cert\") pod \"authentication-operator-69f744f599-qt6tc\" (UID: \"22033762-41c1-4e84-88d5-59187abf701f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qt6tc" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.301287 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.304348 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/782436e6-7d7f-4e44-afe6-542014b15e86-serving-cert\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.310834 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.319560 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/24eb6775-1135-4cc7-9e62-103e142f285a-console-serving-cert\") pod \"console-f9d7485db-g9jpx\" (UID: \"24eb6775-1135-4cc7-9e62-103e142f285a\") " pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.321372 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.321404 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.321414 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v7mzh"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.321436 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-495q2"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.321522 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5kkqj" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.322196 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7df8904d-d89c-41c3-b207-795a41e7cd3f-serving-cert\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.323769 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-mxsss"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.324639 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-2wpmg"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.325227 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r2npz"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.325806 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r2npz" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.326260 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.326268 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29530620-fvlp9"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.338223 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-hwvz9"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.338294 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29530620-fvlp9" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.338669 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-c7c5g"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.338717 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-hwvz9" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.340663 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.348017 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.348565 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-c7c5g" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.348729 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-qrhqs"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.349130 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.350087 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vdn4d"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.350372 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.350618 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-rlmdc"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.350853 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vdn4d" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.351810 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-75r79"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.351834 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4p48q"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.351845 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-lzrvp"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.351927 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.354762 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9skpv"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.356341 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-xzc7c"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.356656 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-bdng5"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.358569 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29530620-fvlp9"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.358594 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-8mk2m"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.360334 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-rcwhk"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.360616 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.362404 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-qt6tc"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.363834 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jfn2h"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.365560 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2zrzx"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.365859 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-kz6hj"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.366686 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-n6kdb"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.367773 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-5kkqj"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.368331 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6e72626f-db77-4945-8fb1-48c1d7507251-metrics-tls\") pod \"dns-operator-744455d44c-n6kdb\" (UID: \"6e72626f-db77-4945-8fb1-48c1d7507251\") " pod="openshift-dns-operator/dns-operator-744455d44c-n6kdb" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.368438 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/f489a96f-1839-4986-9340-e9b9d8960435-stats-auth\") pod \"router-default-5444994796-rf5l7\" (UID: \"f489a96f-1839-4986-9340-e9b9d8960435\") " pod="openshift-ingress/router-default-5444994796-rf5l7" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.368463 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4ae41025-47a2-4192-8650-e3bec3a0a8f7-bound-sa-token\") pod \"ingress-operator-5b745b69d9-kz6hj\" (UID: \"4ae41025-47a2-4192-8650-e3bec3a0a8f7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kz6hj" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.368504 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dsfrj\" (UniqueName: \"kubernetes.io/projected/4ae41025-47a2-4192-8650-e3bec3a0a8f7-kube-api-access-dsfrj\") pod \"ingress-operator-5b745b69d9-kz6hj\" (UID: \"4ae41025-47a2-4192-8650-e3bec3a0a8f7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kz6hj" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.368538 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4ae41025-47a2-4192-8650-e3bec3a0a8f7-trusted-ca\") pod \"ingress-operator-5b745b69d9-kz6hj\" (UID: \"4ae41025-47a2-4192-8650-e3bec3a0a8f7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kz6hj" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.368604 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36d9e9ae-0e67-441b-bbdb-a5292cba2360-config\") pod \"machine-approver-56656f9798-f7vd2\" (UID: \"36d9e9ae-0e67-441b-bbdb-a5292cba2360\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f7vd2" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.368636 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gqxvf\" (UniqueName: \"kubernetes.io/projected/4ab9ada9-cbae-4592-b699-e3bf33c09a95-kube-api-access-gqxvf\") pod \"openshift-controller-manager-operator-756b6f6bc6-2zrzx\" (UID: \"4ab9ada9-cbae-4592-b699-e3bf33c09a95\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2zrzx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.368691 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/36d9e9ae-0e67-441b-bbdb-a5292cba2360-auth-proxy-config\") pod \"machine-approver-56656f9798-f7vd2\" (UID: \"36d9e9ae-0e67-441b-bbdb-a5292cba2360\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f7vd2" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.368711 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4ae41025-47a2-4192-8650-e3bec3a0a8f7-metrics-tls\") pod \"ingress-operator-5b745b69d9-kz6hj\" (UID: \"4ae41025-47a2-4192-8650-e3bec3a0a8f7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kz6hj" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.368731 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/36d9e9ae-0e67-441b-bbdb-a5292cba2360-machine-approver-tls\") pod \"machine-approver-56656f9798-f7vd2\" (UID: \"36d9e9ae-0e67-441b-bbdb-a5292cba2360\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f7vd2" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.368793 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qskkb\" (UniqueName: \"kubernetes.io/projected/36d9e9ae-0e67-441b-bbdb-a5292cba2360-kube-api-access-qskkb\") pod \"machine-approver-56656f9798-f7vd2\" (UID: \"36d9e9ae-0e67-441b-bbdb-a5292cba2360\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f7vd2" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.368851 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/f489a96f-1839-4986-9340-e9b9d8960435-default-certificate\") pod \"router-default-5444994796-rf5l7\" (UID: \"f489a96f-1839-4986-9340-e9b9d8960435\") " pod="openshift-ingress/router-default-5444994796-rf5l7" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.368889 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f489a96f-1839-4986-9340-e9b9d8960435-metrics-certs\") pod \"router-default-5444994796-rf5l7\" (UID: \"f489a96f-1839-4986-9340-e9b9d8960435\") " pod="openshift-ingress/router-default-5444994796-rf5l7" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.368934 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f489a96f-1839-4986-9340-e9b9d8960435-service-ca-bundle\") pod \"router-default-5444994796-rf5l7\" (UID: \"f489a96f-1839-4986-9340-e9b9d8960435\") " pod="openshift-ingress/router-default-5444994796-rf5l7" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.369006 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-cd8np"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.369899 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/36d9e9ae-0e67-441b-bbdb-a5292cba2360-config\") pod \"machine-approver-56656f9798-f7vd2\" (UID: \"36d9e9ae-0e67-441b-bbdb-a5292cba2360\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f7vd2" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.369955 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/36d9e9ae-0e67-441b-bbdb-a5292cba2360-auth-proxy-config\") pod \"machine-approver-56656f9798-f7vd2\" (UID: \"36d9e9ae-0e67-441b-bbdb-a5292cba2360\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f7vd2" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.370323 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/4ae41025-47a2-4192-8650-e3bec3a0a8f7-trusted-ca\") pod \"ingress-operator-5b745b69d9-kz6hj\" (UID: \"4ae41025-47a2-4192-8650-e3bec3a0a8f7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kz6hj" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.370948 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-cd8np" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.371525 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4ab9ada9-cbae-4592-b699-e3bf33c09a95-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-2zrzx\" (UID: \"4ab9ada9-cbae-4592-b699-e3bf33c09a95\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2zrzx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.371565 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tsbd\" (UniqueName: \"kubernetes.io/projected/f489a96f-1839-4986-9340-e9b9d8960435-kube-api-access-4tsbd\") pod \"router-default-5444994796-rf5l7\" (UID: \"f489a96f-1839-4986-9340-e9b9d8960435\") " pod="openshift-ingress/router-default-5444994796-rf5l7" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.371762 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qq7hb\" (UniqueName: \"kubernetes.io/projected/fbca2b49-c933-408c-9c80-fb1202bfb6f1-kube-api-access-qq7hb\") pod \"downloads-7954f5f757-mxsss\" (UID: \"fbca2b49-c933-408c-9c80-fb1202bfb6f1\") " pod="openshift-console/downloads-7954f5f757-mxsss" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.372223 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/36d9e9ae-0e67-441b-bbdb-a5292cba2360-machine-approver-tls\") pod \"machine-approver-56656f9798-f7vd2\" (UID: \"36d9e9ae-0e67-441b-bbdb-a5292cba2360\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f7vd2" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.372629 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k9lbb"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.372585 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kk2sl\" (UniqueName: \"kubernetes.io/projected/6e72626f-db77-4945-8fb1-48c1d7507251-kube-api-access-kk2sl\") pod \"dns-operator-744455d44c-n6kdb\" (UID: \"6e72626f-db77-4945-8fb1-48c1d7507251\") " pod="openshift-dns-operator/dns-operator-744455d44c-n6kdb" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.372659 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/6e72626f-db77-4945-8fb1-48c1d7507251-metrics-tls\") pod \"dns-operator-744455d44c-n6kdb\" (UID: \"6e72626f-db77-4945-8fb1-48c1d7507251\") " pod="openshift-dns-operator/dns-operator-744455d44c-n6kdb" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.372678 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4ab9ada9-cbae-4592-b699-e3bf33c09a95-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-2zrzx\" (UID: \"4ab9ada9-cbae-4592-b699-e3bf33c09a95\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2zrzx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.373719 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/4ae41025-47a2-4192-8650-e3bec3a0a8f7-metrics-tls\") pod \"ingress-operator-5b745b69d9-kz6hj\" (UID: \"4ae41025-47a2-4192-8650-e3bec3a0a8f7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kz6hj" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.373951 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cwqg7"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.374840 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-6rmcx"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.376012 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-hwvz9"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.377311 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r2npz"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.378944 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-897nm"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.379558 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.380789 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-g9jpx"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.381939 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vdn4d"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.382910 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-hlc9q"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.383894 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-crc7p"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.384990 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8pjt4"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.386303 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-m8kxw"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.387289 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-c7c5g"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.388241 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.393118 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/f489a96f-1839-4986-9340-e9b9d8960435-default-certificate\") pod \"router-default-5444994796-rf5l7\" (UID: \"f489a96f-1839-4986-9340-e9b9d8960435\") " pod="openshift-ingress/router-default-5444994796-rf5l7" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.393276 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-qrhqs"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.395726 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-c9zqb"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.397657 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-c9zqb" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.398227 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-ljtgg"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.399945 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-ljtgg" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.400025 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.403895 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-c9zqb"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.404991 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/f489a96f-1839-4986-9340-e9b9d8960435-stats-auth\") pod \"router-default-5444994796-rf5l7\" (UID: \"f489a96f-1839-4986-9340-e9b9d8960435\") " pod="openshift-ingress/router-default-5444994796-rf5l7" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.406354 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-ljtgg"] Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.428913 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.433282 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f489a96f-1839-4986-9340-e9b9d8960435-metrics-certs\") pod \"router-default-5444994796-rf5l7\" (UID: \"f489a96f-1839-4986-9340-e9b9d8960435\") " pod="openshift-ingress/router-default-5444994796-rf5l7" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.439649 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.460110 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.480338 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.499027 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.519866 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.539985 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.560032 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.580683 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.584276 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.584296 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.584287 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.600301 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.620551 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.640538 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.660567 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.680179 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.699827 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.701227 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f489a96f-1839-4986-9340-e9b9d8960435-service-ca-bundle\") pod \"router-default-5444994796-rf5l7\" (UID: \"f489a96f-1839-4986-9340-e9b9d8960435\") " pod="openshift-ingress/router-default-5444994796-rf5l7" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.719969 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.740096 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.760168 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.778811 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.800845 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.807802 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4ab9ada9-cbae-4592-b699-e3bf33c09a95-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-2zrzx\" (UID: \"4ab9ada9-cbae-4592-b699-e3bf33c09a95\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2zrzx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.820084 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.839820 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.842790 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4ab9ada9-cbae-4592-b699-e3bf33c09a95-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-2zrzx\" (UID: \"4ab9ada9-cbae-4592-b699-e3bf33c09a95\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2zrzx" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.860727 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.880853 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.899766 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.919930 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.940116 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.970436 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Feb 23 09:09:39 crc kubenswrapper[4834]: I0223 09:09:39.979837 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.001185 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.021432 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.040122 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.059787 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.081588 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.099930 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.121233 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.139708 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.160345 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.180444 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.200037 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.261863 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.277879 4834 request.go:700] Waited for 1.007497988s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-multus/secrets?fieldSelector=metadata.name%3Dmultus-admission-controller-secret&limit=500&resourceVersion=0 Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.281261 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.300751 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.321356 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.346107 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.360170 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.380491 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.415930 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sd288\" (UniqueName: \"kubernetes.io/projected/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-kube-api-access-sd288\") pod \"controller-manager-879f6c89f-8mk2m\" (UID: \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\") " pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.464088 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjs2f\" (UniqueName: \"kubernetes.io/projected/24eb6775-1135-4cc7-9e62-103e142f285a-kube-api-access-fjs2f\") pod \"console-f9d7485db-g9jpx\" (UID: \"24eb6775-1135-4cc7-9e62-103e142f285a\") " pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.469714 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.480205 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.482981 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6rrzk\" (UniqueName: \"kubernetes.io/projected/22033762-41c1-4e84-88d5-59187abf701f-kube-api-access-6rrzk\") pod \"authentication-operator-69f744f599-qt6tc\" (UID: \"22033762-41c1-4e84-88d5-59187abf701f\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-qt6tc" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.492025 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkc6n\" (UniqueName: \"kubernetes.io/projected/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-kube-api-access-dkc6n\") pod \"oauth-openshift-558db77b4-4p48q\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.499953 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.520200 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.540052 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.560956 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.580657 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.585270 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nzcfx" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.600097 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.636687 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67hvc\" (UniqueName: \"kubernetes.io/projected/3b0c4f5d-0e85-4f0e-be00-ed9f70d3d578-kube-api-access-67hvc\") pod \"openshift-apiserver-operator-796bbdcf4f-v7mzh\" (UID: \"3b0c4f5d-0e85-4f0e-be00-ed9f70d3d578\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v7mzh" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.655949 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsmhs\" (UniqueName: \"kubernetes.io/projected/0f47419a-2e53-440b-854b-9fd226fb17d2-kube-api-access-nsmhs\") pod \"machine-api-operator-5694c8668f-bjngd\" (UID: \"0f47419a-2e53-440b-854b-9fd226fb17d2\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-bjngd" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.665459 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.674793 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klsth\" (UniqueName: \"kubernetes.io/projected/782436e6-7d7f-4e44-afe6-542014b15e86-kube-api-access-klsth\") pod \"apiserver-76f77b778f-897nm\" (UID: \"782436e6-7d7f-4e44-afe6-542014b15e86\") " pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.675360 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.695202 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-qt6tc" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.695680 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-g9jpx"] Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.705231 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p6pts\" (UniqueName: \"kubernetes.io/projected/c628e572-63b9-478b-bf3a-6ff1966480a1-kube-api-access-p6pts\") pod \"route-controller-manager-6576b87f9c-xslpt\" (UID: \"c628e572-63b9-478b-bf3a-6ff1966480a1\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.719523 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.719744 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.720269 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6qdvx\" (UniqueName: \"kubernetes.io/projected/7df8904d-d89c-41c3-b207-795a41e7cd3f-kube-api-access-6qdvx\") pod \"apiserver-7bbb656c7d-2r5bx\" (UID: \"7df8904d-d89c-41c3-b207-795a41e7cd3f\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.735511 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v7mzh" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.741526 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.761080 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.782318 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.800321 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.820118 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.840755 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.861605 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.872627 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-8mk2m"] Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.879081 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Feb 23 09:09:40 crc kubenswrapper[4834]: W0223 09:09:40.890778 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddcbde5e8_97ff_42f6_85a9_9fb6f2ceee39.slice/crio-201ac388bf828fc084f26790dd7d6491b37140b5c8abe2cac813754c7e04f1d8 WatchSource:0}: Error finding container 201ac388bf828fc084f26790dd7d6491b37140b5c8abe2cac813754c7e04f1d8: Status 404 returned error can't find the container with id 201ac388bf828fc084f26790dd7d6491b37140b5c8abe2cac813754c7e04f1d8 Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.899288 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.912269 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-bjngd" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.919974 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.924223 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.924623 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4p48q"] Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.941590 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.952317 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.962354 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.973876 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v7mzh"] Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.982994 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Feb 23 09:09:40 crc kubenswrapper[4834]: I0223 09:09:40.983535 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-897nm"] Feb 23 09:09:40 crc kubenswrapper[4834]: W0223 09:09:40.989720 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b0c4f5d_0e85_4f0e_be00_ed9f70d3d578.slice/crio-45f012a3a93b791bd4d1af0e14653fbe41631bd6aece0c61c8e0fb3e7cfd9ccd WatchSource:0}: Error finding container 45f012a3a93b791bd4d1af0e14653fbe41631bd6aece0c61c8e0fb3e7cfd9ccd: Status 404 returned error can't find the container with id 45f012a3a93b791bd4d1af0e14653fbe41631bd6aece0c61c8e0fb3e7cfd9ccd Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.000874 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Feb 23 09:09:41 crc kubenswrapper[4834]: W0223 09:09:41.001466 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod782436e6_7d7f_4e44_afe6_542014b15e86.slice/crio-176b2abef2d66f48b46007e73232d6adf89651cf8f606a393e6ac3e3a1ab0b6c WatchSource:0}: Error finding container 176b2abef2d66f48b46007e73232d6adf89651cf8f606a393e6ac3e3a1ab0b6c: Status 404 returned error can't find the container with id 176b2abef2d66f48b46007e73232d6adf89651cf8f606a393e6ac3e3a1ab0b6c Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.020632 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.040463 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.069316 4834 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.082639 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.091104 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-bjngd"] Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.111226 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Feb 23 09:09:41 crc kubenswrapper[4834]: W0223 09:09:41.118099 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0f47419a_2e53_440b_854b_9fd226fb17d2.slice/crio-d28dc873176ee5833afb35f9f640c4e589d5b746dd69f6981f9266abd374d153 WatchSource:0}: Error finding container d28dc873176ee5833afb35f9f640c4e589d5b746dd69f6981f9266abd374d153: Status 404 returned error can't find the container with id d28dc873176ee5833afb35f9f640c4e589d5b746dd69f6981f9266abd374d153 Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.119367 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.135648 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-qt6tc"] Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.142134 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-sysctl-allowlist" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.162186 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx"] Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.181547 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gqxvf\" (UniqueName: \"kubernetes.io/projected/4ab9ada9-cbae-4592-b699-e3bf33c09a95-kube-api-access-gqxvf\") pod \"openshift-controller-manager-operator-756b6f6bc6-2zrzx\" (UID: \"4ab9ada9-cbae-4592-b699-e3bf33c09a95\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2zrzx" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.189679 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt"] Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.197236 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dsfrj\" (UniqueName: \"kubernetes.io/projected/4ae41025-47a2-4192-8650-e3bec3a0a8f7-kube-api-access-dsfrj\") pod \"ingress-operator-5b745b69d9-kz6hj\" (UID: \"4ae41025-47a2-4192-8650-e3bec3a0a8f7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kz6hj" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.220599 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/4ae41025-47a2-4192-8650-e3bec3a0a8f7-bound-sa-token\") pod \"ingress-operator-5b745b69d9-kz6hj\" (UID: \"4ae41025-47a2-4192-8650-e3bec3a0a8f7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kz6hj" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.236788 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v7mzh" event={"ID":"3b0c4f5d-0e85-4f0e-be00-ed9f70d3d578","Type":"ContainerStarted","Data":"fafaf3fe563295cb83e6c9bb68234fd6d1a0f48a3290c170afd889b53f4bf8c2"} Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.236842 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v7mzh" event={"ID":"3b0c4f5d-0e85-4f0e-be00-ed9f70d3d578","Type":"ContainerStarted","Data":"45f012a3a93b791bd4d1af0e14653fbe41631bd6aece0c61c8e0fb3e7cfd9ccd"} Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.238079 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qskkb\" (UniqueName: \"kubernetes.io/projected/36d9e9ae-0e67-441b-bbdb-a5292cba2360-kube-api-access-qskkb\") pod \"machine-approver-56656f9798-f7vd2\" (UID: \"36d9e9ae-0e67-441b-bbdb-a5292cba2360\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f7vd2" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.239717 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-897nm" event={"ID":"782436e6-7d7f-4e44-afe6-542014b15e86","Type":"ContainerStarted","Data":"176b2abef2d66f48b46007e73232d6adf89651cf8f606a393e6ac3e3a1ab0b6c"} Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.240134 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.241128 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-qt6tc" event={"ID":"22033762-41c1-4e84-88d5-59187abf701f","Type":"ContainerStarted","Data":"30012e4c3cecec7802bc2c2fd7009334ecb6ea50f5bd20b4c65492c6c72131f7"} Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.241864 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" event={"ID":"c628e572-63b9-478b-bf3a-6ff1966480a1","Type":"ContainerStarted","Data":"b01ece9bcb110803fd09b8ad8d157d66ef39c1a73c1b1fb63417a6b2aeefc63d"} Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.242751 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" event={"ID":"7df8904d-d89c-41c3-b207-795a41e7cd3f","Type":"ContainerStarted","Data":"3735b05c144f2db39e6d347b95656082add7b8f2a98f24b693bb97225a0a1232"} Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.245056 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" event={"ID":"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39","Type":"ContainerStarted","Data":"1afc1551d544cf2a7d08d6356b6ea290d5abce35e1f693735cfc3c93ecafc0db"} Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.245090 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" event={"ID":"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39","Type":"ContainerStarted","Data":"201ac388bf828fc084f26790dd7d6491b37140b5c8abe2cac813754c7e04f1d8"} Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.245536 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.250597 4834 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-8mk2m container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.250626 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-bjngd" event={"ID":"0f47419a-2e53-440b-854b-9fd226fb17d2","Type":"ContainerStarted","Data":"d28dc873176ee5833afb35f9f640c4e589d5b746dd69f6981f9266abd374d153"} Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.250663 4834 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" podUID="dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.253054 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" event={"ID":"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd","Type":"ContainerStarted","Data":"27e8b8ca5e6a3505481dec20a7f6620271808938b332b4bae13a83932a179d72"} Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.255515 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-g9jpx" event={"ID":"24eb6775-1135-4cc7-9e62-103e142f285a","Type":"ContainerStarted","Data":"62d756f5a27af7cc026514e1eb26bf54d718fac6c8e5c20859faab44fa5bb0a3"} Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.255540 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-g9jpx" event={"ID":"24eb6775-1135-4cc7-9e62-103e142f285a","Type":"ContainerStarted","Data":"69894fcb8756a0ef631556076b61bebd5d75ad673f2c43458d384de6d4227a1d"} Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.259946 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.277459 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2zrzx" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.278167 4834 request.go:700] Waited for 1.906668954s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/secrets?fieldSelector=metadata.name%3Dmachine-config-server-dockercfg-qx5rd&limit=500&resourceVersion=0 Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.280035 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.326207 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tsbd\" (UniqueName: \"kubernetes.io/projected/f489a96f-1839-4986-9340-e9b9d8960435-kube-api-access-4tsbd\") pod \"router-default-5444994796-rf5l7\" (UID: \"f489a96f-1839-4986-9340-e9b9d8960435\") " pod="openshift-ingress/router-default-5444994796-rf5l7" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.348645 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qq7hb\" (UniqueName: \"kubernetes.io/projected/fbca2b49-c933-408c-9c80-fb1202bfb6f1-kube-api-access-qq7hb\") pod \"downloads-7954f5f757-mxsss\" (UID: \"fbca2b49-c933-408c-9c80-fb1202bfb6f1\") " pod="openshift-console/downloads-7954f5f757-mxsss" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.358251 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kk2sl\" (UniqueName: \"kubernetes.io/projected/6e72626f-db77-4945-8fb1-48c1d7507251-kube-api-access-kk2sl\") pod \"dns-operator-744455d44c-n6kdb\" (UID: \"6e72626f-db77-4945-8fb1-48c1d7507251\") " pod="openshift-dns-operator/dns-operator-744455d44c-n6kdb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.360128 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.381501 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.401053 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.420041 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.441589 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f7vd2" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.442194 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.455615 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-n6kdb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.459518 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Feb 23 09:09:41 crc kubenswrapper[4834]: W0223 09:09:41.471888 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod36d9e9ae_0e67_441b_bbdb_a5292cba2360.slice/crio-470e35eaca1681ef16aa1b9bc99144b82e7780d011302d50a903b0f155ec8cd7 WatchSource:0}: Error finding container 470e35eaca1681ef16aa1b9bc99144b82e7780d011302d50a903b0f155ec8cd7: Status 404 returned error can't find the container with id 470e35eaca1681ef16aa1b9bc99144b82e7780d011302d50a903b0f155ec8cd7 Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.479636 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.481980 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-mxsss" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.492264 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2zrzx"] Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.500299 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Feb 23 09:09:41 crc kubenswrapper[4834]: W0223 09:09:41.501423 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4ab9ada9_cbae_4592_b699_e3bf33c09a95.slice/crio-d17c4dfb72842923c3728232915339a731636d17c2386bbd5dd9e7ec9c60fdf4 WatchSource:0}: Error finding container d17c4dfb72842923c3728232915339a731636d17c2386bbd5dd9e7ec9c60fdf4: Status 404 returned error can't find the container with id d17c4dfb72842923c3728232915339a731636d17c2386bbd5dd9e7ec9c60fdf4 Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.502377 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kz6hj" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.520804 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.537206 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-rf5l7" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.539561 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.564123 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.627021 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-registry-certificates\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.627073 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-bound-sa-token\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.627122 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jtzq9\" (UniqueName: \"kubernetes.io/projected/94bf75a0-18d7-4f14-8b4b-a9050ab2eab7-kube-api-access-jtzq9\") pod \"console-operator-58897d9998-bdng5\" (UID: \"94bf75a0-18d7-4f14-8b4b-a9050ab2eab7\") " pod="openshift-console-operator/console-operator-58897d9998-bdng5" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.627148 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-registry-tls\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.627169 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/59d59459-2ab3-40d8-9cc4-f68e34377748-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-495q2\" (UID: \"59d59459-2ab3-40d8-9cc4-f68e34377748\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-495q2" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.627216 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.627191 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-52kzh\" (UniqueName: \"kubernetes.io/projected/59d59459-2ab3-40d8-9cc4-f68e34377748-kube-api-access-52kzh\") pod \"cluster-samples-operator-665b6dd947-495q2\" (UID: \"59d59459-2ab3-40d8-9cc4-f68e34377748\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-495q2" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.627724 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/383bf705-7d54-4400-9739-2b7c48b20ef8-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-75r79\" (UID: \"383bf705-7d54-4400-9739-2b7c48b20ef8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-75r79" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.627779 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vm7rl\" (UniqueName: \"kubernetes.io/projected/383bf705-7d54-4400-9739-2b7c48b20ef8-kube-api-access-vm7rl\") pod \"kube-storage-version-migrator-operator-b67b599dd-75r79\" (UID: \"383bf705-7d54-4400-9739-2b7c48b20ef8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-75r79" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.627849 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/94bf75a0-18d7-4f14-8b4b-a9050ab2eab7-serving-cert\") pod \"console-operator-58897d9998-bdng5\" (UID: \"94bf75a0-18d7-4f14-8b4b-a9050ab2eab7\") " pod="openshift-console-operator/console-operator-58897d9998-bdng5" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.627889 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gghd7\" (UniqueName: \"kubernetes.io/projected/c76b76a1-92df-4e16-b72b-ae9f3d952c72-kube-api-access-gghd7\") pod \"control-plane-machine-set-operator-78cbb6b69f-k9lbb\" (UID: \"c76b76a1-92df-4e16-b72b-ae9f3d952c72\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k9lbb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.627988 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-ca-trust-extracted\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.628010 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-trusted-ca\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.629055 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gc2js\" (UniqueName: \"kubernetes.io/projected/5608eb40-e8c9-4701-85ee-68b1cbd4b79c-kube-api-access-gc2js\") pod \"migrator-59844c95c7-2wpmg\" (UID: \"5608eb40-e8c9-4701-85ee-68b1cbd4b79c\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2wpmg" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.630608 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bf46\" (UniqueName: \"kubernetes.io/projected/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-kube-api-access-9bf46\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.630685 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94bf75a0-18d7-4f14-8b4b-a9050ab2eab7-config\") pod \"console-operator-58897d9998-bdng5\" (UID: \"94bf75a0-18d7-4f14-8b4b-a9050ab2eab7\") " pod="openshift-console-operator/console-operator-58897d9998-bdng5" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.630907 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/94bf75a0-18d7-4f14-8b4b-a9050ab2eab7-trusted-ca\") pod \"console-operator-58897d9998-bdng5\" (UID: \"94bf75a0-18d7-4f14-8b4b-a9050ab2eab7\") " pod="openshift-console-operator/console-operator-58897d9998-bdng5" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.630966 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/c76b76a1-92df-4e16-b72b-ae9f3d952c72-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-k9lbb\" (UID: \"c76b76a1-92df-4e16-b72b-ae9f3d952c72\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k9lbb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.631010 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.631075 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-installation-pull-secrets\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.631120 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/383bf705-7d54-4400-9739-2b7c48b20ef8-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-75r79\" (UID: \"383bf705-7d54-4400-9739-2b7c48b20ef8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-75r79" Feb 23 09:09:41 crc kubenswrapper[4834]: E0223 09:09:41.631643 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:42.131630007 +0000 UTC m=+118.209944394 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.642307 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.731916 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:41 crc kubenswrapper[4834]: E0223 09:09:41.732045 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:42.232019838 +0000 UTC m=+118.310334235 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.732234 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-registry-tls\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.732277 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54jxz\" (UniqueName: \"kubernetes.io/projected/3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d-kube-api-access-54jxz\") pod \"cluster-image-registry-operator-dc59b4c8b-8pjt4\" (UID: \"3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8pjt4" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.732359 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vm7rl\" (UniqueName: \"kubernetes.io/projected/383bf705-7d54-4400-9739-2b7c48b20ef8-kube-api-access-vm7rl\") pod \"kube-storage-version-migrator-operator-b67b599dd-75r79\" (UID: \"383bf705-7d54-4400-9739-2b7c48b20ef8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-75r79" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.732401 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/ad9b3cbe-a60c-43af-92e7-fb757f59162b-available-featuregates\") pod \"openshift-config-operator-7777fb866f-lzrvp\" (UID: \"ad9b3cbe-a60c-43af-92e7-fb757f59162b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lzrvp" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.732483 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/94bf75a0-18d7-4f14-8b4b-a9050ab2eab7-serving-cert\") pod \"console-operator-58897d9998-bdng5\" (UID: \"94bf75a0-18d7-4f14-8b4b-a9050ab2eab7\") " pod="openshift-console-operator/console-operator-58897d9998-bdng5" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.732529 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gghd7\" (UniqueName: \"kubernetes.io/projected/c76b76a1-92df-4e16-b72b-ae9f3d952c72-kube-api-access-gghd7\") pod \"control-plane-machine-set-operator-78cbb6b69f-k9lbb\" (UID: \"c76b76a1-92df-4e16-b72b-ae9f3d952c72\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k9lbb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.732573 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1ef3d4b6-98be-4c13-99ee-6787dff39425-serving-cert\") pod \"etcd-operator-b45778765-xzc7c\" (UID: \"1ef3d4b6-98be-4c13-99ee-6787dff39425\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.732598 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-ca-trust-extracted\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.732633 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gc2js\" (UniqueName: \"kubernetes.io/projected/5608eb40-e8c9-4701-85ee-68b1cbd4b79c-kube-api-access-gc2js\") pod \"migrator-59844c95c7-2wpmg\" (UID: \"5608eb40-e8c9-4701-85ee-68b1cbd4b79c\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2wpmg" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.732657 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ba759e0-a281-467d-aa5a-7ac7d97c67fc-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jfn2h\" (UID: \"2ba759e0-a281-467d-aa5a-7ac7d97c67fc\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jfn2h" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.732684 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-8pjt4\" (UID: \"3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8pjt4" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.732707 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94bf75a0-18d7-4f14-8b4b-a9050ab2eab7-config\") pod \"console-operator-58897d9998-bdng5\" (UID: \"94bf75a0-18d7-4f14-8b4b-a9050ab2eab7\") " pod="openshift-console-operator/console-operator-58897d9998-bdng5" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.732746 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bf3d38f3-bb40-4318-bd61-19df5d4e3572-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-cwqg7\" (UID: \"bf3d38f3-bb40-4318-bd61-19df5d4e3572\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cwqg7" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.732776 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3998427e-dcd1-4c1e-ba32-cfacf3c9fa44-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-crc7p\" (UID: \"3998427e-dcd1-4c1e-ba32-cfacf3c9fa44\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-crc7p" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.732805 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.732836 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-installation-pull-secrets\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.732858 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/383bf705-7d54-4400-9739-2b7c48b20ef8-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-75r79\" (UID: \"383bf705-7d54-4400-9739-2b7c48b20ef8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-75r79" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.732883 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bf3d38f3-bb40-4318-bd61-19df5d4e3572-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-cwqg7\" (UID: \"bf3d38f3-bb40-4318-bd61-19df5d4e3572\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cwqg7" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.732907 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ad9b3cbe-a60c-43af-92e7-fb757f59162b-serving-cert\") pod \"openshift-config-operator-7777fb866f-lzrvp\" (UID: \"ad9b3cbe-a60c-43af-92e7-fb757f59162b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lzrvp" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.732973 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-registry-certificates\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.732998 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-bound-sa-token\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.733025 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ba759e0-a281-467d-aa5a-7ac7d97c67fc-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jfn2h\" (UID: \"2ba759e0-a281-467d-aa5a-7ac7d97c67fc\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jfn2h" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.733051 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jtzq9\" (UniqueName: \"kubernetes.io/projected/94bf75a0-18d7-4f14-8b4b-a9050ab2eab7-kube-api-access-jtzq9\") pod \"console-operator-58897d9998-bdng5\" (UID: \"94bf75a0-18d7-4f14-8b4b-a9050ab2eab7\") " pod="openshift-console-operator/console-operator-58897d9998-bdng5" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.733086 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1ef3d4b6-98be-4c13-99ee-6787dff39425-etcd-client\") pod \"etcd-operator-b45778765-xzc7c\" (UID: \"1ef3d4b6-98be-4c13-99ee-6787dff39425\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.733112 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-8pjt4\" (UID: \"3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8pjt4" Feb 23 09:09:41 crc kubenswrapper[4834]: E0223 09:09:41.733957 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:42.233945652 +0000 UTC m=+118.312260039 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.734509 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-ca-trust-extracted\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.734522 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/59d59459-2ab3-40d8-9cc4-f68e34377748-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-495q2\" (UID: \"59d59459-2ab3-40d8-9cc4-f68e34377748\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-495q2" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.734547 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-52kzh\" (UniqueName: \"kubernetes.io/projected/59d59459-2ab3-40d8-9cc4-f68e34377748-kube-api-access-52kzh\") pod \"cluster-samples-operator-665b6dd947-495q2\" (UID: \"59d59459-2ab3-40d8-9cc4-f68e34377748\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-495q2" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.734564 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkltm\" (UniqueName: \"kubernetes.io/projected/1ef3d4b6-98be-4c13-99ee-6787dff39425-kube-api-access-xkltm\") pod \"etcd-operator-b45778765-xzc7c\" (UID: \"1ef3d4b6-98be-4c13-99ee-6787dff39425\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.734582 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/383bf705-7d54-4400-9739-2b7c48b20ef8-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-75r79\" (UID: \"383bf705-7d54-4400-9739-2b7c48b20ef8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-75r79" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.734612 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3998427e-dcd1-4c1e-ba32-cfacf3c9fa44-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-crc7p\" (UID: \"3998427e-dcd1-4c1e-ba32-cfacf3c9fa44\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-crc7p" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.734668 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/1ef3d4b6-98be-4c13-99ee-6787dff39425-etcd-service-ca\") pod \"etcd-operator-b45778765-xzc7c\" (UID: \"1ef3d4b6-98be-4c13-99ee-6787dff39425\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.734695 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/1ef3d4b6-98be-4c13-99ee-6787dff39425-etcd-ca\") pod \"etcd-operator-b45778765-xzc7c\" (UID: \"1ef3d4b6-98be-4c13-99ee-6787dff39425\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.734711 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2srz9\" (UniqueName: \"kubernetes.io/projected/ad9b3cbe-a60c-43af-92e7-fb757f59162b-kube-api-access-2srz9\") pod \"openshift-config-operator-7777fb866f-lzrvp\" (UID: \"ad9b3cbe-a60c-43af-92e7-fb757f59162b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lzrvp" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.736061 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/94bf75a0-18d7-4f14-8b4b-a9050ab2eab7-config\") pod \"console-operator-58897d9998-bdng5\" (UID: \"94bf75a0-18d7-4f14-8b4b-a9050ab2eab7\") " pod="openshift-console-operator/console-operator-58897d9998-bdng5" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.736295 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-registry-certificates\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.738513 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/383bf705-7d54-4400-9739-2b7c48b20ef8-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-75r79\" (UID: \"383bf705-7d54-4400-9739-2b7c48b20ef8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-75r79" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.741302 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-8pjt4\" (UID: \"3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8pjt4" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.741345 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf3d38f3-bb40-4318-bd61-19df5d4e3572-config\") pod \"kube-controller-manager-operator-78b949d7b-cwqg7\" (UID: \"bf3d38f3-bb40-4318-bd61-19df5d4e3572\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cwqg7" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.742024 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-trusted-ca\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.742196 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ef3d4b6-98be-4c13-99ee-6787dff39425-config\") pod \"etcd-operator-b45778765-xzc7c\" (UID: \"1ef3d4b6-98be-4c13-99ee-6787dff39425\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.742628 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bf46\" (UniqueName: \"kubernetes.io/projected/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-kube-api-access-9bf46\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.744594 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3998427e-dcd1-4c1e-ba32-cfacf3c9fa44-config\") pod \"kube-apiserver-operator-766d6c64bb-crc7p\" (UID: \"3998427e-dcd1-4c1e-ba32-cfacf3c9fa44\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-crc7p" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.744728 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-installation-pull-secrets\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.744888 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/94bf75a0-18d7-4f14-8b4b-a9050ab2eab7-trusted-ca\") pod \"console-operator-58897d9998-bdng5\" (UID: \"94bf75a0-18d7-4f14-8b4b-a9050ab2eab7\") " pod="openshift-console-operator/console-operator-58897d9998-bdng5" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.745146 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/94bf75a0-18d7-4f14-8b4b-a9050ab2eab7-serving-cert\") pod \"console-operator-58897d9998-bdng5\" (UID: \"94bf75a0-18d7-4f14-8b4b-a9050ab2eab7\") " pod="openshift-console-operator/console-operator-58897d9998-bdng5" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.745446 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/c76b76a1-92df-4e16-b72b-ae9f3d952c72-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-k9lbb\" (UID: \"c76b76a1-92df-4e16-b72b-ae9f3d952c72\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k9lbb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.745524 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2ba759e0-a281-467d-aa5a-7ac7d97c67fc-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jfn2h\" (UID: \"2ba759e0-a281-467d-aa5a-7ac7d97c67fc\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jfn2h" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.745900 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/59d59459-2ab3-40d8-9cc4-f68e34377748-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-495q2\" (UID: \"59d59459-2ab3-40d8-9cc4-f68e34377748\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-495q2" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.746022 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/94bf75a0-18d7-4f14-8b4b-a9050ab2eab7-trusted-ca\") pod \"console-operator-58897d9998-bdng5\" (UID: \"94bf75a0-18d7-4f14-8b4b-a9050ab2eab7\") " pod="openshift-console-operator/console-operator-58897d9998-bdng5" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.749532 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-registry-tls\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.750354 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/383bf705-7d54-4400-9739-2b7c48b20ef8-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-75r79\" (UID: \"383bf705-7d54-4400-9739-2b7c48b20ef8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-75r79" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.751497 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-trusted-ca\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.753471 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/c76b76a1-92df-4e16-b72b-ae9f3d952c72-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-k9lbb\" (UID: \"c76b76a1-92df-4e16-b72b-ae9f3d952c72\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k9lbb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.781944 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vm7rl\" (UniqueName: \"kubernetes.io/projected/383bf705-7d54-4400-9739-2b7c48b20ef8-kube-api-access-vm7rl\") pod \"kube-storage-version-migrator-operator-b67b599dd-75r79\" (UID: \"383bf705-7d54-4400-9739-2b7c48b20ef8\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-75r79" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.805113 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jtzq9\" (UniqueName: \"kubernetes.io/projected/94bf75a0-18d7-4f14-8b4b-a9050ab2eab7-kube-api-access-jtzq9\") pod \"console-operator-58897d9998-bdng5\" (UID: \"94bf75a0-18d7-4f14-8b4b-a9050ab2eab7\") " pod="openshift-console-operator/console-operator-58897d9998-bdng5" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.813259 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-n6kdb"] Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.820210 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-bound-sa-token\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.846122 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gghd7\" (UniqueName: \"kubernetes.io/projected/c76b76a1-92df-4e16-b72b-ae9f3d952c72-kube-api-access-gghd7\") pod \"control-plane-machine-set-operator-78cbb6b69f-k9lbb\" (UID: \"c76b76a1-92df-4e16-b72b-ae9f3d952c72\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k9lbb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.846586 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.846894 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0829b4ef-0b9e-49c6-ba38-daafdd8332ce-proxy-tls\") pod \"machine-config-controller-84d6567774-6rmcx\" (UID: \"0829b4ef-0b9e-49c6-ba38-daafdd8332ce\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rmcx" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.846961 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/441917a1-296e-4529-a79f-458faf4769e6-ready\") pod \"cni-sysctl-allowlist-ds-rlmdc\" (UID: \"441917a1-296e-4529-a79f-458faf4769e6\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.846991 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6b454f53-4bc6-4e49-a80d-2f31fe1dccb9-cert\") pod \"ingress-canary-ljtgg\" (UID: \"6b454f53-4bc6-4e49-a80d-2f31fe1dccb9\") " pod="openshift-ingress-canary/ingress-canary-ljtgg" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.847038 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/59fb1571-215c-49da-a6ae-3c2152ef19f6-secret-volume\") pod \"collect-profiles-29530620-fvlp9\" (UID: \"59fb1571-215c-49da-a6ae-3c2152ef19f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29530620-fvlp9" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.847061 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hv4gk\" (UniqueName: \"kubernetes.io/projected/21b99c3b-2c7f-49ed-9c9c-a4bf0d2318dc-kube-api-access-hv4gk\") pod \"service-ca-9c57cc56f-hwvz9\" (UID: \"21b99c3b-2c7f-49ed-9c9c-a4bf0d2318dc\") " pod="openshift-service-ca/service-ca-9c57cc56f-hwvz9" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.847155 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/4beb05d8-6d1a-488a-8ce4-e35d73b04e38-profile-collector-cert\") pod \"olm-operator-6b444d44fb-r2npz\" (UID: \"4beb05d8-6d1a-488a-8ce4-e35d73b04e38\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r2npz" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.847186 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a00a2272-643a-4144-aa4c-ff2d40639e8c-srv-cert\") pod \"catalog-operator-68c6474976-9skpv\" (UID: \"a00a2272-643a-4144-aa4c-ff2d40639e8c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9skpv" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.847230 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1ef3d4b6-98be-4c13-99ee-6787dff39425-serving-cert\") pod \"etcd-operator-b45778765-xzc7c\" (UID: \"1ef3d4b6-98be-4c13-99ee-6787dff39425\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.847251 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/299c472a-f5bc-4330-a44e-82cb3490d9bd-serving-cert\") pod \"service-ca-operator-777779d784-c7c5g\" (UID: \"299c472a-f5bc-4330-a44e-82cb3490d9bd\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-c7c5g" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.847272 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vxrl5\" (UniqueName: \"kubernetes.io/projected/a00a2272-643a-4144-aa4c-ff2d40639e8c-kube-api-access-vxrl5\") pod \"catalog-operator-68c6474976-9skpv\" (UID: \"a00a2272-643a-4144-aa4c-ff2d40639e8c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9skpv" Feb 23 09:09:41 crc kubenswrapper[4834]: E0223 09:09:41.847339 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:42.347307373 +0000 UTC m=+118.425621820 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.847791 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/83a0c203-a9b8-462a-ba20-536641aa0721-socket-dir\") pod \"csi-hostpathplugin-qrhqs\" (UID: \"83a0c203-a9b8-462a-ba20-536641aa0721\") " pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.847830 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4thfs\" (UniqueName: \"kubernetes.io/projected/2c9de897-17f3-4444-ad95-b5e07b40f6c8-kube-api-access-4thfs\") pod \"marketplace-operator-79b997595-hlc9q\" (UID: \"2c9de897-17f3-4444-ad95-b5e07b40f6c8\") " pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.847853 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkmzp\" (UniqueName: \"kubernetes.io/projected/42d36f07-e27c-49e0-bad8-472e93d3875d-kube-api-access-lkmzp\") pod \"multus-admission-controller-857f4d67dd-m8kxw\" (UID: \"42d36f07-e27c-49e0-bad8-472e93d3875d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-m8kxw" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.847880 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/4beb05d8-6d1a-488a-8ce4-e35d73b04e38-srv-cert\") pod \"olm-operator-6b444d44fb-r2npz\" (UID: \"4beb05d8-6d1a-488a-8ce4-e35d73b04e38\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r2npz" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.847905 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ba759e0-a281-467d-aa5a-7ac7d97c67fc-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jfn2h\" (UID: \"2ba759e0-a281-467d-aa5a-7ac7d97c67fc\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jfn2h" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.848066 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5kdz\" (UniqueName: \"kubernetes.io/projected/59fb1571-215c-49da-a6ae-3c2152ef19f6-kube-api-access-q5kdz\") pod \"collect-profiles-29530620-fvlp9\" (UID: \"59fb1571-215c-49da-a6ae-3c2152ef19f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29530620-fvlp9" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.848110 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-8pjt4\" (UID: \"3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8pjt4" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.849541 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3998427e-dcd1-4c1e-ba32-cfacf3c9fa44-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-crc7p\" (UID: \"3998427e-dcd1-4c1e-ba32-cfacf3c9fa44\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-crc7p" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.849582 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/21b99c3b-2c7f-49ed-9c9c-a4bf0d2318dc-signing-cabundle\") pod \"service-ca-9c57cc56f-hwvz9\" (UID: \"21b99c3b-2c7f-49ed-9c9c-a4bf0d2318dc\") " pod="openshift-service-ca/service-ca-9c57cc56f-hwvz9" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.849622 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bf3d38f3-bb40-4318-bd61-19df5d4e3572-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-cwqg7\" (UID: \"bf3d38f3-bb40-4318-bd61-19df5d4e3572\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cwqg7" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.849652 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.849694 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ad9b3cbe-a60c-43af-92e7-fb757f59162b-serving-cert\") pod \"openshift-config-operator-7777fb866f-lzrvp\" (UID: \"ad9b3cbe-a60c-43af-92e7-fb757f59162b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lzrvp" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.849743 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bf3d38f3-bb40-4318-bd61-19df5d4e3572-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-cwqg7\" (UID: \"bf3d38f3-bb40-4318-bd61-19df5d4e3572\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cwqg7" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.849948 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btxmv\" (UniqueName: \"kubernetes.io/projected/441917a1-296e-4529-a79f-458faf4769e6-kube-api-access-btxmv\") pod \"cni-sysctl-allowlist-ds-rlmdc\" (UID: \"441917a1-296e-4529-a79f-458faf4769e6\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.849988 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/441917a1-296e-4529-a79f-458faf4769e6-tuning-conf-dir\") pod \"cni-sysctl-allowlist-ds-rlmdc\" (UID: \"441917a1-296e-4529-a79f-458faf4769e6\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" Feb 23 09:09:41 crc kubenswrapper[4834]: E0223 09:09:41.850283 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:42.350264885 +0000 UTC m=+118.428579272 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.851054 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ba759e0-a281-467d-aa5a-7ac7d97c67fc-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jfn2h\" (UID: \"2ba759e0-a281-467d-aa5a-7ac7d97c67fc\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jfn2h" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.852495 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1ef3d4b6-98be-4c13-99ee-6787dff39425-etcd-client\") pod \"etcd-operator-b45778765-xzc7c\" (UID: \"1ef3d4b6-98be-4c13-99ee-6787dff39425\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.852533 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-8pjt4\" (UID: \"3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8pjt4" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.852527 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1ef3d4b6-98be-4c13-99ee-6787dff39425-serving-cert\") pod \"etcd-operator-b45778765-xzc7c\" (UID: \"1ef3d4b6-98be-4c13-99ee-6787dff39425\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.852555 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/42d36f07-e27c-49e0-bad8-472e93d3875d-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-m8kxw\" (UID: \"42d36f07-e27c-49e0-bad8-472e93d3875d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-m8kxw" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.852596 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkltm\" (UniqueName: \"kubernetes.io/projected/1ef3d4b6-98be-4c13-99ee-6787dff39425-kube-api-access-xkltm\") pod \"etcd-operator-b45778765-xzc7c\" (UID: \"1ef3d4b6-98be-4c13-99ee-6787dff39425\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.852616 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nsbmq\" (UniqueName: \"kubernetes.io/projected/617a1fea-21a9-4cfd-81df-d0f3ebd52652-kube-api-access-nsbmq\") pod \"packageserver-d55dfcdfc-fg4xb\" (UID: \"617a1fea-21a9-4cfd-81df-d0f3ebd52652\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.852638 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/9707583d-7687-4854-8501-eacac0abff04-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-vdn4d\" (UID: \"9707583d-7687-4854-8501-eacac0abff04\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vdn4d" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.852661 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0829b4ef-0b9e-49c6-ba38-daafdd8332ce-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-6rmcx\" (UID: \"0829b4ef-0b9e-49c6-ba38-daafdd8332ce\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rmcx" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.852681 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/299c472a-f5bc-4330-a44e-82cb3490d9bd-config\") pod \"service-ca-operator-777779d784-c7c5g\" (UID: \"299c472a-f5bc-4330-a44e-82cb3490d9bd\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-c7c5g" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.852698 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3998427e-dcd1-4c1e-ba32-cfacf3c9fa44-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-crc7p\" (UID: \"3998427e-dcd1-4c1e-ba32-cfacf3c9fa44\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-crc7p" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.852713 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/617a1fea-21a9-4cfd-81df-d0f3ebd52652-tmpfs\") pod \"packageserver-d55dfcdfc-fg4xb\" (UID: \"617a1fea-21a9-4cfd-81df-d0f3ebd52652\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.852741 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/1ef3d4b6-98be-4c13-99ee-6787dff39425-etcd-service-ca\") pod \"etcd-operator-b45778765-xzc7c\" (UID: \"1ef3d4b6-98be-4c13-99ee-6787dff39425\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.852758 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/a2db3816-ea50-4e5b-ab5a-a4d306950672-certs\") pod \"machine-config-server-cd8np\" (UID: \"a2db3816-ea50-4e5b-ab5a-a4d306950672\") " pod="openshift-machine-config-operator/machine-config-server-cd8np" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.852776 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/1ef3d4b6-98be-4c13-99ee-6787dff39425-etcd-ca\") pod \"etcd-operator-b45778765-xzc7c\" (UID: \"1ef3d4b6-98be-4c13-99ee-6787dff39425\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.852795 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2srz9\" (UniqueName: \"kubernetes.io/projected/ad9b3cbe-a60c-43af-92e7-fb757f59162b-kube-api-access-2srz9\") pod \"openshift-config-operator-7777fb866f-lzrvp\" (UID: \"ad9b3cbe-a60c-43af-92e7-fb757f59162b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lzrvp" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.852813 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/83a0c203-a9b8-462a-ba20-536641aa0721-registration-dir\") pod \"csi-hostpathplugin-qrhqs\" (UID: \"83a0c203-a9b8-462a-ba20-536641aa0721\") " pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.852835 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2c9de897-17f3-4444-ad95-b5e07b40f6c8-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-hlc9q\" (UID: \"2c9de897-17f3-4444-ad95-b5e07b40f6c8\") " pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.851512 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-mxsss"] Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.852872 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/2e300c80-7530-4d72-acd8-4bfdca03c327-metrics-tls\") pod \"dns-default-c9zqb\" (UID: \"2e300c80-7530-4d72-acd8-4bfdca03c327\") " pod="openshift-dns/dns-default-c9zqb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.852966 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/8fba4843-d9f5-4156-8d0b-d0e764be8942-images\") pod \"machine-config-operator-74547568cd-5kkqj\" (UID: \"8fba4843-d9f5-4156-8d0b-d0e764be8942\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5kkqj" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.852991 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/59fb1571-215c-49da-a6ae-3c2152ef19f6-config-volume\") pod \"collect-profiles-29530620-fvlp9\" (UID: \"59fb1571-215c-49da-a6ae-3c2152ef19f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29530620-fvlp9" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.853016 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/a00a2272-643a-4144-aa4c-ff2d40639e8c-profile-collector-cert\") pod \"catalog-operator-68c6474976-9skpv\" (UID: \"a00a2272-643a-4144-aa4c-ff2d40639e8c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9skpv" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.853036 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdk6h\" (UniqueName: \"kubernetes.io/projected/4beb05d8-6d1a-488a-8ce4-e35d73b04e38-kube-api-access-mdk6h\") pod \"olm-operator-6b444d44fb-r2npz\" (UID: \"4beb05d8-6d1a-488a-8ce4-e35d73b04e38\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r2npz" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.853057 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/617a1fea-21a9-4cfd-81df-d0f3ebd52652-webhook-cert\") pod \"packageserver-d55dfcdfc-fg4xb\" (UID: \"617a1fea-21a9-4cfd-81df-d0f3ebd52652\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.853101 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-8pjt4\" (UID: \"3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8pjt4" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.853119 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf3d38f3-bb40-4318-bd61-19df5d4e3572-config\") pod \"kube-controller-manager-operator-78b949d7b-cwqg7\" (UID: \"bf3d38f3-bb40-4318-bd61-19df5d4e3572\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cwqg7" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.853137 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/83a0c203-a9b8-462a-ba20-536641aa0721-csi-data-dir\") pod \"csi-hostpathplugin-qrhqs\" (UID: \"83a0c203-a9b8-462a-ba20-536641aa0721\") " pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.853175 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ef3d4b6-98be-4c13-99ee-6787dff39425-config\") pod \"etcd-operator-b45778765-xzc7c\" (UID: \"1ef3d4b6-98be-4c13-99ee-6787dff39425\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.853200 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8fba4843-d9f5-4156-8d0b-d0e764be8942-auth-proxy-config\") pod \"machine-config-operator-74547568cd-5kkqj\" (UID: \"8fba4843-d9f5-4156-8d0b-d0e764be8942\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5kkqj" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.853269 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcjz9\" (UniqueName: \"kubernetes.io/projected/2e300c80-7530-4d72-acd8-4bfdca03c327-kube-api-access-mcjz9\") pod \"dns-default-c9zqb\" (UID: \"2e300c80-7530-4d72-acd8-4bfdca03c327\") " pod="openshift-dns/dns-default-c9zqb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.853288 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/441917a1-296e-4529-a79f-458faf4769e6-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-rlmdc\" (UID: \"441917a1-296e-4529-a79f-458faf4769e6\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.853309 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tjx4v\" (UniqueName: \"kubernetes.io/projected/0829b4ef-0b9e-49c6-ba38-daafdd8332ce-kube-api-access-tjx4v\") pod \"machine-config-controller-84d6567774-6rmcx\" (UID: \"0829b4ef-0b9e-49c6-ba38-daafdd8332ce\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rmcx" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.853281 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-8pjt4\" (UID: \"3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8pjt4" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.853341 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3998427e-dcd1-4c1e-ba32-cfacf3c9fa44-config\") pod \"kube-apiserver-operator-766d6c64bb-crc7p\" (UID: \"3998427e-dcd1-4c1e-ba32-cfacf3c9fa44\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-crc7p" Feb 23 09:09:41 crc kubenswrapper[4834]: W0223 09:09:41.854328 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfbca2b49_c933_408c_9c80_fb1202bfb6f1.slice/crio-bbc0664f2c28c6957ddbb7600f66fe0bc591f13fd64d49f2b3f07e39df868539 WatchSource:0}: Error finding container bbc0664f2c28c6957ddbb7600f66fe0bc591f13fd64d49f2b3f07e39df868539: Status 404 returned error can't find the container with id bbc0664f2c28c6957ddbb7600f66fe0bc591f13fd64d49f2b3f07e39df868539 Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.854568 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3998427e-dcd1-4c1e-ba32-cfacf3c9fa44-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-crc7p\" (UID: \"3998427e-dcd1-4c1e-ba32-cfacf3c9fa44\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-crc7p" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.854936 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2c9de897-17f3-4444-ad95-b5e07b40f6c8-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-hlc9q\" (UID: \"2c9de897-17f3-4444-ad95-b5e07b40f6c8\") " pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.854977 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqpbq\" (UniqueName: \"kubernetes.io/projected/299c472a-f5bc-4330-a44e-82cb3490d9bd-kube-api-access-lqpbq\") pod \"service-ca-operator-777779d784-c7c5g\" (UID: \"299c472a-f5bc-4330-a44e-82cb3490d9bd\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-c7c5g" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.854995 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/a2db3816-ea50-4e5b-ab5a-a4d306950672-node-bootstrap-token\") pod \"machine-config-server-cd8np\" (UID: \"a2db3816-ea50-4e5b-ab5a-a4d306950672\") " pod="openshift-machine-config-operator/machine-config-server-cd8np" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.855029 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qfwxn\" (UniqueName: \"kubernetes.io/projected/6b454f53-4bc6-4e49-a80d-2f31fe1dccb9-kube-api-access-qfwxn\") pod \"ingress-canary-ljtgg\" (UID: \"6b454f53-4bc6-4e49-a80d-2f31fe1dccb9\") " pod="openshift-ingress-canary/ingress-canary-ljtgg" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.855062 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2ba759e0-a281-467d-aa5a-7ac7d97c67fc-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jfn2h\" (UID: \"2ba759e0-a281-467d-aa5a-7ac7d97c67fc\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jfn2h" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.855082 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8fba4843-d9f5-4156-8d0b-d0e764be8942-proxy-tls\") pod \"machine-config-operator-74547568cd-5kkqj\" (UID: \"8fba4843-d9f5-4156-8d0b-d0e764be8942\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5kkqj" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.855101 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v6q66\" (UniqueName: \"kubernetes.io/projected/9707583d-7687-4854-8501-eacac0abff04-kube-api-access-v6q66\") pod \"package-server-manager-789f6589d5-vdn4d\" (UID: \"9707583d-7687-4854-8501-eacac0abff04\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vdn4d" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.855143 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fp6z\" (UniqueName: \"kubernetes.io/projected/8fba4843-d9f5-4156-8d0b-d0e764be8942-kube-api-access-7fp6z\") pod \"machine-config-operator-74547568cd-5kkqj\" (UID: \"8fba4843-d9f5-4156-8d0b-d0e764be8942\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5kkqj" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.855160 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/83a0c203-a9b8-462a-ba20-536641aa0721-plugins-dir\") pod \"csi-hostpathplugin-qrhqs\" (UID: \"83a0c203-a9b8-462a-ba20-536641aa0721\") " pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.855178 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/1ef3d4b6-98be-4c13-99ee-6787dff39425-etcd-service-ca\") pod \"etcd-operator-b45778765-xzc7c\" (UID: \"1ef3d4b6-98be-4c13-99ee-6787dff39425\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.855196 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7lb6z\" (UniqueName: \"kubernetes.io/projected/83a0c203-a9b8-462a-ba20-536641aa0721-kube-api-access-7lb6z\") pod \"csi-hostpathplugin-qrhqs\" (UID: \"83a0c203-a9b8-462a-ba20-536641aa0721\") " pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.855214 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2e300c80-7530-4d72-acd8-4bfdca03c327-config-volume\") pod \"dns-default-c9zqb\" (UID: \"2e300c80-7530-4d72-acd8-4bfdca03c327\") " pod="openshift-dns/dns-default-c9zqb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.855339 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3998427e-dcd1-4c1e-ba32-cfacf3c9fa44-config\") pod \"kube-apiserver-operator-766d6c64bb-crc7p\" (UID: \"3998427e-dcd1-4c1e-ba32-cfacf3c9fa44\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-crc7p" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.855480 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/83a0c203-a9b8-462a-ba20-536641aa0721-mountpoint-dir\") pod \"csi-hostpathplugin-qrhqs\" (UID: \"83a0c203-a9b8-462a-ba20-536641aa0721\") " pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.855508 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2ba759e0-a281-467d-aa5a-7ac7d97c67fc-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jfn2h\" (UID: \"2ba759e0-a281-467d-aa5a-7ac7d97c67fc\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jfn2h" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.855562 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54jxz\" (UniqueName: \"kubernetes.io/projected/3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d-kube-api-access-54jxz\") pod \"cluster-image-registry-operator-dc59b4c8b-8pjt4\" (UID: \"3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8pjt4" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.855666 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/617a1fea-21a9-4cfd-81df-d0f3ebd52652-apiservice-cert\") pod \"packageserver-d55dfcdfc-fg4xb\" (UID: \"617a1fea-21a9-4cfd-81df-d0f3ebd52652\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.855816 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/21b99c3b-2c7f-49ed-9c9c-a4bf0d2318dc-signing-key\") pod \"service-ca-9c57cc56f-hwvz9\" (UID: \"21b99c3b-2c7f-49ed-9c9c-a4bf0d2318dc\") " pod="openshift-service-ca/service-ca-9c57cc56f-hwvz9" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.856179 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/1ef3d4b6-98be-4c13-99ee-6787dff39425-etcd-ca\") pod \"etcd-operator-b45778765-xzc7c\" (UID: \"1ef3d4b6-98be-4c13-99ee-6787dff39425\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.856213 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/ad9b3cbe-a60c-43af-92e7-fb757f59162b-available-featuregates\") pod \"openshift-config-operator-7777fb866f-lzrvp\" (UID: \"ad9b3cbe-a60c-43af-92e7-fb757f59162b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lzrvp" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.856244 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zplh8\" (UniqueName: \"kubernetes.io/projected/a2db3816-ea50-4e5b-ab5a-a4d306950672-kube-api-access-zplh8\") pod \"machine-config-server-cd8np\" (UID: \"a2db3816-ea50-4e5b-ab5a-a4d306950672\") " pod="openshift-machine-config-operator/machine-config-server-cd8np" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.857003 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ad9b3cbe-a60c-43af-92e7-fb757f59162b-serving-cert\") pod \"openshift-config-operator-7777fb866f-lzrvp\" (UID: \"ad9b3cbe-a60c-43af-92e7-fb757f59162b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lzrvp" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.857655 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-8pjt4\" (UID: \"3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8pjt4" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.857847 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1ef3d4b6-98be-4c13-99ee-6787dff39425-config\") pod \"etcd-operator-b45778765-xzc7c\" (UID: \"1ef3d4b6-98be-4c13-99ee-6787dff39425\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.857956 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/ad9b3cbe-a60c-43af-92e7-fb757f59162b-available-featuregates\") pod \"openshift-config-operator-7777fb866f-lzrvp\" (UID: \"ad9b3cbe-a60c-43af-92e7-fb757f59162b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lzrvp" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.857987 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf3d38f3-bb40-4318-bd61-19df5d4e3572-config\") pod \"kube-controller-manager-operator-78b949d7b-cwqg7\" (UID: \"bf3d38f3-bb40-4318-bd61-19df5d4e3572\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cwqg7" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.859563 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-kz6hj"] Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.859589 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bf3d38f3-bb40-4318-bd61-19df5d4e3572-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-cwqg7\" (UID: \"bf3d38f3-bb40-4318-bd61-19df5d4e3572\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cwqg7" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.861639 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2ba759e0-a281-467d-aa5a-7ac7d97c67fc-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jfn2h\" (UID: \"2ba759e0-a281-467d-aa5a-7ac7d97c67fc\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jfn2h" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.863676 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gc2js\" (UniqueName: \"kubernetes.io/projected/5608eb40-e8c9-4701-85ee-68b1cbd4b79c-kube-api-access-gc2js\") pod \"migrator-59844c95c7-2wpmg\" (UID: \"5608eb40-e8c9-4701-85ee-68b1cbd4b79c\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2wpmg" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.864683 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1ef3d4b6-98be-4c13-99ee-6787dff39425-etcd-client\") pod \"etcd-operator-b45778765-xzc7c\" (UID: \"1ef3d4b6-98be-4c13-99ee-6787dff39425\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.877666 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-52kzh\" (UniqueName: \"kubernetes.io/projected/59d59459-2ab3-40d8-9cc4-f68e34377748-kube-api-access-52kzh\") pod \"cluster-samples-operator-665b6dd947-495q2\" (UID: \"59d59459-2ab3-40d8-9cc4-f68e34377748\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-495q2" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.884185 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-bdng5" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.892742 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2wpmg" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.897587 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bf46\" (UniqueName: \"kubernetes.io/projected/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-kube-api-access-9bf46\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.911764 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k9lbb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.925707 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-75r79" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.937304 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/2ba759e0-a281-467d-aa5a-7ac7d97c67fc-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-jfn2h\" (UID: \"2ba759e0-a281-467d-aa5a-7ac7d97c67fc\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jfn2h" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.956860 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bf3d38f3-bb40-4318-bd61-19df5d4e3572-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-cwqg7\" (UID: \"bf3d38f3-bb40-4318-bd61-19df5d4e3572\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cwqg7" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.957986 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:41 crc kubenswrapper[4834]: E0223 09:09:41.958217 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:42.458185515 +0000 UTC m=+118.536499902 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958284 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqpbq\" (UniqueName: \"kubernetes.io/projected/299c472a-f5bc-4330-a44e-82cb3490d9bd-kube-api-access-lqpbq\") pod \"service-ca-operator-777779d784-c7c5g\" (UID: \"299c472a-f5bc-4330-a44e-82cb3490d9bd\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-c7c5g" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958338 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/a2db3816-ea50-4e5b-ab5a-a4d306950672-node-bootstrap-token\") pod \"machine-config-server-cd8np\" (UID: \"a2db3816-ea50-4e5b-ab5a-a4d306950672\") " pod="openshift-machine-config-operator/machine-config-server-cd8np" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958369 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qfwxn\" (UniqueName: \"kubernetes.io/projected/6b454f53-4bc6-4e49-a80d-2f31fe1dccb9-kube-api-access-qfwxn\") pod \"ingress-canary-ljtgg\" (UID: \"6b454f53-4bc6-4e49-a80d-2f31fe1dccb9\") " pod="openshift-ingress-canary/ingress-canary-ljtgg" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958417 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8fba4843-d9f5-4156-8d0b-d0e764be8942-proxy-tls\") pod \"machine-config-operator-74547568cd-5kkqj\" (UID: \"8fba4843-d9f5-4156-8d0b-d0e764be8942\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5kkqj" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958447 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/83a0c203-a9b8-462a-ba20-536641aa0721-plugins-dir\") pod \"csi-hostpathplugin-qrhqs\" (UID: \"83a0c203-a9b8-462a-ba20-536641aa0721\") " pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958469 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v6q66\" (UniqueName: \"kubernetes.io/projected/9707583d-7687-4854-8501-eacac0abff04-kube-api-access-v6q66\") pod \"package-server-manager-789f6589d5-vdn4d\" (UID: \"9707583d-7687-4854-8501-eacac0abff04\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vdn4d" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958493 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fp6z\" (UniqueName: \"kubernetes.io/projected/8fba4843-d9f5-4156-8d0b-d0e764be8942-kube-api-access-7fp6z\") pod \"machine-config-operator-74547568cd-5kkqj\" (UID: \"8fba4843-d9f5-4156-8d0b-d0e764be8942\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5kkqj" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958526 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7lb6z\" (UniqueName: \"kubernetes.io/projected/83a0c203-a9b8-462a-ba20-536641aa0721-kube-api-access-7lb6z\") pod \"csi-hostpathplugin-qrhqs\" (UID: \"83a0c203-a9b8-462a-ba20-536641aa0721\") " pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958548 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2e300c80-7530-4d72-acd8-4bfdca03c327-config-volume\") pod \"dns-default-c9zqb\" (UID: \"2e300c80-7530-4d72-acd8-4bfdca03c327\") " pod="openshift-dns/dns-default-c9zqb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958570 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/83a0c203-a9b8-462a-ba20-536641aa0721-mountpoint-dir\") pod \"csi-hostpathplugin-qrhqs\" (UID: \"83a0c203-a9b8-462a-ba20-536641aa0721\") " pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958640 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/617a1fea-21a9-4cfd-81df-d0f3ebd52652-apiservice-cert\") pod \"packageserver-d55dfcdfc-fg4xb\" (UID: \"617a1fea-21a9-4cfd-81df-d0f3ebd52652\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958666 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/21b99c3b-2c7f-49ed-9c9c-a4bf0d2318dc-signing-key\") pod \"service-ca-9c57cc56f-hwvz9\" (UID: \"21b99c3b-2c7f-49ed-9c9c-a4bf0d2318dc\") " pod="openshift-service-ca/service-ca-9c57cc56f-hwvz9" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958699 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zplh8\" (UniqueName: \"kubernetes.io/projected/a2db3816-ea50-4e5b-ab5a-a4d306950672-kube-api-access-zplh8\") pod \"machine-config-server-cd8np\" (UID: \"a2db3816-ea50-4e5b-ab5a-a4d306950672\") " pod="openshift-machine-config-operator/machine-config-server-cd8np" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958728 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0829b4ef-0b9e-49c6-ba38-daafdd8332ce-proxy-tls\") pod \"machine-config-controller-84d6567774-6rmcx\" (UID: \"0829b4ef-0b9e-49c6-ba38-daafdd8332ce\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rmcx" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958750 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/441917a1-296e-4529-a79f-458faf4769e6-ready\") pod \"cni-sysctl-allowlist-ds-rlmdc\" (UID: \"441917a1-296e-4529-a79f-458faf4769e6\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958768 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6b454f53-4bc6-4e49-a80d-2f31fe1dccb9-cert\") pod \"ingress-canary-ljtgg\" (UID: \"6b454f53-4bc6-4e49-a80d-2f31fe1dccb9\") " pod="openshift-ingress-canary/ingress-canary-ljtgg" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958788 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/59fb1571-215c-49da-a6ae-3c2152ef19f6-secret-volume\") pod \"collect-profiles-29530620-fvlp9\" (UID: \"59fb1571-215c-49da-a6ae-3c2152ef19f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29530620-fvlp9" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958805 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hv4gk\" (UniqueName: \"kubernetes.io/projected/21b99c3b-2c7f-49ed-9c9c-a4bf0d2318dc-kube-api-access-hv4gk\") pod \"service-ca-9c57cc56f-hwvz9\" (UID: \"21b99c3b-2c7f-49ed-9c9c-a4bf0d2318dc\") " pod="openshift-service-ca/service-ca-9c57cc56f-hwvz9" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958823 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/4beb05d8-6d1a-488a-8ce4-e35d73b04e38-profile-collector-cert\") pod \"olm-operator-6b444d44fb-r2npz\" (UID: \"4beb05d8-6d1a-488a-8ce4-e35d73b04e38\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r2npz" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958843 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a00a2272-643a-4144-aa4c-ff2d40639e8c-srv-cert\") pod \"catalog-operator-68c6474976-9skpv\" (UID: \"a00a2272-643a-4144-aa4c-ff2d40639e8c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9skpv" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958866 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/299c472a-f5bc-4330-a44e-82cb3490d9bd-serving-cert\") pod \"service-ca-operator-777779d784-c7c5g\" (UID: \"299c472a-f5bc-4330-a44e-82cb3490d9bd\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-c7c5g" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958884 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vxrl5\" (UniqueName: \"kubernetes.io/projected/a00a2272-643a-4144-aa4c-ff2d40639e8c-kube-api-access-vxrl5\") pod \"catalog-operator-68c6474976-9skpv\" (UID: \"a00a2272-643a-4144-aa4c-ff2d40639e8c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9skpv" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958905 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/83a0c203-a9b8-462a-ba20-536641aa0721-socket-dir\") pod \"csi-hostpathplugin-qrhqs\" (UID: \"83a0c203-a9b8-462a-ba20-536641aa0721\") " pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958932 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4thfs\" (UniqueName: \"kubernetes.io/projected/2c9de897-17f3-4444-ad95-b5e07b40f6c8-kube-api-access-4thfs\") pod \"marketplace-operator-79b997595-hlc9q\" (UID: \"2c9de897-17f3-4444-ad95-b5e07b40f6c8\") " pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958956 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkmzp\" (UniqueName: \"kubernetes.io/projected/42d36f07-e27c-49e0-bad8-472e93d3875d-kube-api-access-lkmzp\") pod \"multus-admission-controller-857f4d67dd-m8kxw\" (UID: \"42d36f07-e27c-49e0-bad8-472e93d3875d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-m8kxw" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958981 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/4beb05d8-6d1a-488a-8ce4-e35d73b04e38-srv-cert\") pod \"olm-operator-6b444d44fb-r2npz\" (UID: \"4beb05d8-6d1a-488a-8ce4-e35d73b04e38\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r2npz" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959008 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5kdz\" (UniqueName: \"kubernetes.io/projected/59fb1571-215c-49da-a6ae-3c2152ef19f6-kube-api-access-q5kdz\") pod \"collect-profiles-29530620-fvlp9\" (UID: \"59fb1571-215c-49da-a6ae-3c2152ef19f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29530620-fvlp9" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959041 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/21b99c3b-2c7f-49ed-9c9c-a4bf0d2318dc-signing-cabundle\") pod \"service-ca-9c57cc56f-hwvz9\" (UID: \"21b99c3b-2c7f-49ed-9c9c-a4bf0d2318dc\") " pod="openshift-service-ca/service-ca-9c57cc56f-hwvz9" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959071 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959106 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btxmv\" (UniqueName: \"kubernetes.io/projected/441917a1-296e-4529-a79f-458faf4769e6-kube-api-access-btxmv\") pod \"cni-sysctl-allowlist-ds-rlmdc\" (UID: \"441917a1-296e-4529-a79f-458faf4769e6\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959116 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/83a0c203-a9b8-462a-ba20-536641aa0721-mountpoint-dir\") pod \"csi-hostpathplugin-qrhqs\" (UID: \"83a0c203-a9b8-462a-ba20-536641aa0721\") " pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959131 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/441917a1-296e-4529-a79f-458faf4769e6-tuning-conf-dir\") pod \"cni-sysctl-allowlist-ds-rlmdc\" (UID: \"441917a1-296e-4529-a79f-458faf4769e6\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959168 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/42d36f07-e27c-49e0-bad8-472e93d3875d-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-m8kxw\" (UID: \"42d36f07-e27c-49e0-bad8-472e93d3875d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-m8kxw" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959199 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nsbmq\" (UniqueName: \"kubernetes.io/projected/617a1fea-21a9-4cfd-81df-d0f3ebd52652-kube-api-access-nsbmq\") pod \"packageserver-d55dfcdfc-fg4xb\" (UID: \"617a1fea-21a9-4cfd-81df-d0f3ebd52652\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959225 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/299c472a-f5bc-4330-a44e-82cb3490d9bd-config\") pod \"service-ca-operator-777779d784-c7c5g\" (UID: \"299c472a-f5bc-4330-a44e-82cb3490d9bd\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-c7c5g" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959230 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2e300c80-7530-4d72-acd8-4bfdca03c327-config-volume\") pod \"dns-default-c9zqb\" (UID: \"2e300c80-7530-4d72-acd8-4bfdca03c327\") " pod="openshift-dns/dns-default-c9zqb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959251 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/9707583d-7687-4854-8501-eacac0abff04-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-vdn4d\" (UID: \"9707583d-7687-4854-8501-eacac0abff04\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vdn4d" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959279 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0829b4ef-0b9e-49c6-ba38-daafdd8332ce-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-6rmcx\" (UID: \"0829b4ef-0b9e-49c6-ba38-daafdd8332ce\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rmcx" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959317 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/617a1fea-21a9-4cfd-81df-d0f3ebd52652-tmpfs\") pod \"packageserver-d55dfcdfc-fg4xb\" (UID: \"617a1fea-21a9-4cfd-81df-d0f3ebd52652\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959349 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/a2db3816-ea50-4e5b-ab5a-a4d306950672-certs\") pod \"machine-config-server-cd8np\" (UID: \"a2db3816-ea50-4e5b-ab5a-a4d306950672\") " pod="openshift-machine-config-operator/machine-config-server-cd8np" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959397 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/83a0c203-a9b8-462a-ba20-536641aa0721-registration-dir\") pod \"csi-hostpathplugin-qrhqs\" (UID: \"83a0c203-a9b8-462a-ba20-536641aa0721\") " pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959443 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2c9de897-17f3-4444-ad95-b5e07b40f6c8-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-hlc9q\" (UID: \"2c9de897-17f3-4444-ad95-b5e07b40f6c8\") " pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959467 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/2e300c80-7530-4d72-acd8-4bfdca03c327-metrics-tls\") pod \"dns-default-c9zqb\" (UID: \"2e300c80-7530-4d72-acd8-4bfdca03c327\") " pod="openshift-dns/dns-default-c9zqb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959489 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/59fb1571-215c-49da-a6ae-3c2152ef19f6-config-volume\") pod \"collect-profiles-29530620-fvlp9\" (UID: \"59fb1571-215c-49da-a6ae-3c2152ef19f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29530620-fvlp9" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959510 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/8fba4843-d9f5-4156-8d0b-d0e764be8942-images\") pod \"machine-config-operator-74547568cd-5kkqj\" (UID: \"8fba4843-d9f5-4156-8d0b-d0e764be8942\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5kkqj" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959531 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/a00a2272-643a-4144-aa4c-ff2d40639e8c-profile-collector-cert\") pod \"catalog-operator-68c6474976-9skpv\" (UID: \"a00a2272-643a-4144-aa4c-ff2d40639e8c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9skpv" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959551 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdk6h\" (UniqueName: \"kubernetes.io/projected/4beb05d8-6d1a-488a-8ce4-e35d73b04e38-kube-api-access-mdk6h\") pod \"olm-operator-6b444d44fb-r2npz\" (UID: \"4beb05d8-6d1a-488a-8ce4-e35d73b04e38\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r2npz" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959575 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/617a1fea-21a9-4cfd-81df-d0f3ebd52652-webhook-cert\") pod \"packageserver-d55dfcdfc-fg4xb\" (UID: \"617a1fea-21a9-4cfd-81df-d0f3ebd52652\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959598 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/83a0c203-a9b8-462a-ba20-536641aa0721-csi-data-dir\") pod \"csi-hostpathplugin-qrhqs\" (UID: \"83a0c203-a9b8-462a-ba20-536641aa0721\") " pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959627 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8fba4843-d9f5-4156-8d0b-d0e764be8942-auth-proxy-config\") pod \"machine-config-operator-74547568cd-5kkqj\" (UID: \"8fba4843-d9f5-4156-8d0b-d0e764be8942\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5kkqj" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959656 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcjz9\" (UniqueName: \"kubernetes.io/projected/2e300c80-7530-4d72-acd8-4bfdca03c327-kube-api-access-mcjz9\") pod \"dns-default-c9zqb\" (UID: \"2e300c80-7530-4d72-acd8-4bfdca03c327\") " pod="openshift-dns/dns-default-c9zqb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959676 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/441917a1-296e-4529-a79f-458faf4769e6-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-rlmdc\" (UID: \"441917a1-296e-4529-a79f-458faf4769e6\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959702 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tjx4v\" (UniqueName: \"kubernetes.io/projected/0829b4ef-0b9e-49c6-ba38-daafdd8332ce-kube-api-access-tjx4v\") pod \"machine-config-controller-84d6567774-6rmcx\" (UID: \"0829b4ef-0b9e-49c6-ba38-daafdd8332ce\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rmcx" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.959766 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2c9de897-17f3-4444-ad95-b5e07b40f6c8-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-hlc9q\" (UID: \"2c9de897-17f3-4444-ad95-b5e07b40f6c8\") " pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.961144 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/83a0c203-a9b8-462a-ba20-536641aa0721-csi-data-dir\") pod \"csi-hostpathplugin-qrhqs\" (UID: \"83a0c203-a9b8-462a-ba20-536641aa0721\") " pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.961215 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2c9de897-17f3-4444-ad95-b5e07b40f6c8-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-hlc9q\" (UID: \"2c9de897-17f3-4444-ad95-b5e07b40f6c8\") " pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.961371 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/299c472a-f5bc-4330-a44e-82cb3490d9bd-config\") pod \"service-ca-operator-777779d784-c7c5g\" (UID: \"299c472a-f5bc-4330-a44e-82cb3490d9bd\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-c7c5g" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.961894 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/441917a1-296e-4529-a79f-458faf4769e6-ready\") pod \"cni-sysctl-allowlist-ds-rlmdc\" (UID: \"441917a1-296e-4529-a79f-458faf4769e6\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.962995 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/8fba4843-d9f5-4156-8d0b-d0e764be8942-proxy-tls\") pod \"machine-config-operator-74547568cd-5kkqj\" (UID: \"8fba4843-d9f5-4156-8d0b-d0e764be8942\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5kkqj" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.963436 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/83a0c203-a9b8-462a-ba20-536641aa0721-registration-dir\") pod \"csi-hostpathplugin-qrhqs\" (UID: \"83a0c203-a9b8-462a-ba20-536641aa0721\") " pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.963519 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/441917a1-296e-4529-a79f-458faf4769e6-cni-sysctl-allowlist\") pod \"cni-sysctl-allowlist-ds-rlmdc\" (UID: \"441917a1-296e-4529-a79f-458faf4769e6\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.963929 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0829b4ef-0b9e-49c6-ba38-daafdd8332ce-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-6rmcx\" (UID: \"0829b4ef-0b9e-49c6-ba38-daafdd8332ce\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rmcx" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.964050 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/617a1fea-21a9-4cfd-81df-d0f3ebd52652-tmpfs\") pod \"packageserver-d55dfcdfc-fg4xb\" (UID: \"617a1fea-21a9-4cfd-81df-d0f3ebd52652\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.965515 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/21b99c3b-2c7f-49ed-9c9c-a4bf0d2318dc-signing-cabundle\") pod \"service-ca-9c57cc56f-hwvz9\" (UID: \"21b99c3b-2c7f-49ed-9c9c-a4bf0d2318dc\") " pod="openshift-service-ca/service-ca-9c57cc56f-hwvz9" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.965597 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/a2db3816-ea50-4e5b-ab5a-a4d306950672-node-bootstrap-token\") pod \"machine-config-server-cd8np\" (UID: \"a2db3816-ea50-4e5b-ab5a-a4d306950672\") " pod="openshift-machine-config-operator/machine-config-server-cd8np" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.965645 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/59fb1571-215c-49da-a6ae-3c2152ef19f6-secret-volume\") pod \"collect-profiles-29530620-fvlp9\" (UID: \"59fb1571-215c-49da-a6ae-3c2152ef19f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29530620-fvlp9" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.965998 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/8fba4843-d9f5-4156-8d0b-d0e764be8942-auth-proxy-config\") pod \"machine-config-operator-74547568cd-5kkqj\" (UID: \"8fba4843-d9f5-4156-8d0b-d0e764be8942\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5kkqj" Feb 23 09:09:41 crc kubenswrapper[4834]: E0223 09:09:41.966004 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:42.465984782 +0000 UTC m=+118.544299249 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.966054 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/83a0c203-a9b8-462a-ba20-536641aa0721-socket-dir\") pod \"csi-hostpathplugin-qrhqs\" (UID: \"83a0c203-a9b8-462a-ba20-536641aa0721\") " pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.958960 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/83a0c203-a9b8-462a-ba20-536641aa0721-plugins-dir\") pod \"csi-hostpathplugin-qrhqs\" (UID: \"83a0c203-a9b8-462a-ba20-536641aa0721\") " pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.968741 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/441917a1-296e-4529-a79f-458faf4769e6-tuning-conf-dir\") pod \"cni-sysctl-allowlist-ds-rlmdc\" (UID: \"441917a1-296e-4529-a79f-458faf4769e6\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.969447 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/8fba4843-d9f5-4156-8d0b-d0e764be8942-images\") pod \"machine-config-operator-74547568cd-5kkqj\" (UID: \"8fba4843-d9f5-4156-8d0b-d0e764be8942\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5kkqj" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.969649 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/59fb1571-215c-49da-a6ae-3c2152ef19f6-config-volume\") pod \"collect-profiles-29530620-fvlp9\" (UID: \"59fb1571-215c-49da-a6ae-3c2152ef19f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29530620-fvlp9" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.970914 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/4beb05d8-6d1a-488a-8ce4-e35d73b04e38-profile-collector-cert\") pod \"olm-operator-6b444d44fb-r2npz\" (UID: \"4beb05d8-6d1a-488a-8ce4-e35d73b04e38\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r2npz" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.971757 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2c9de897-17f3-4444-ad95-b5e07b40f6c8-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-hlc9q\" (UID: \"2c9de897-17f3-4444-ad95-b5e07b40f6c8\") " pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.971759 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/21b99c3b-2c7f-49ed-9c9c-a4bf0d2318dc-signing-key\") pod \"service-ca-9c57cc56f-hwvz9\" (UID: \"21b99c3b-2c7f-49ed-9c9c-a4bf0d2318dc\") " pod="openshift-service-ca/service-ca-9c57cc56f-hwvz9" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.971943 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6b454f53-4bc6-4e49-a80d-2f31fe1dccb9-cert\") pod \"ingress-canary-ljtgg\" (UID: \"6b454f53-4bc6-4e49-a80d-2f31fe1dccb9\") " pod="openshift-ingress-canary/ingress-canary-ljtgg" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.972214 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/617a1fea-21a9-4cfd-81df-d0f3ebd52652-webhook-cert\") pod \"packageserver-d55dfcdfc-fg4xb\" (UID: \"617a1fea-21a9-4cfd-81df-d0f3ebd52652\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.972320 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/2e300c80-7530-4d72-acd8-4bfdca03c327-metrics-tls\") pod \"dns-default-c9zqb\" (UID: \"2e300c80-7530-4d72-acd8-4bfdca03c327\") " pod="openshift-dns/dns-default-c9zqb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.972659 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/9707583d-7687-4854-8501-eacac0abff04-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-vdn4d\" (UID: \"9707583d-7687-4854-8501-eacac0abff04\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vdn4d" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.976284 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/617a1fea-21a9-4cfd-81df-d0f3ebd52652-apiservice-cert\") pod \"packageserver-d55dfcdfc-fg4xb\" (UID: \"617a1fea-21a9-4cfd-81df-d0f3ebd52652\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.976564 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/299c472a-f5bc-4330-a44e-82cb3490d9bd-serving-cert\") pod \"service-ca-operator-777779d784-c7c5g\" (UID: \"299c472a-f5bc-4330-a44e-82cb3490d9bd\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-c7c5g" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.976842 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/a00a2272-643a-4144-aa4c-ff2d40639e8c-srv-cert\") pod \"catalog-operator-68c6474976-9skpv\" (UID: \"a00a2272-643a-4144-aa4c-ff2d40639e8c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9skpv" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.977817 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/4beb05d8-6d1a-488a-8ce4-e35d73b04e38-srv-cert\") pod \"olm-operator-6b444d44fb-r2npz\" (UID: \"4beb05d8-6d1a-488a-8ce4-e35d73b04e38\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r2npz" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.978055 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0829b4ef-0b9e-49c6-ba38-daafdd8332ce-proxy-tls\") pod \"machine-config-controller-84d6567774-6rmcx\" (UID: \"0829b4ef-0b9e-49c6-ba38-daafdd8332ce\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rmcx" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.978504 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/a00a2272-643a-4144-aa4c-ff2d40639e8c-profile-collector-cert\") pod \"catalog-operator-68c6474976-9skpv\" (UID: \"a00a2272-643a-4144-aa4c-ff2d40639e8c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9skpv" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.983783 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/42d36f07-e27c-49e0-bad8-472e93d3875d-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-m8kxw\" (UID: \"42d36f07-e27c-49e0-bad8-472e93d3875d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-m8kxw" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.984458 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/a2db3816-ea50-4e5b-ab5a-a4d306950672-certs\") pod \"machine-config-server-cd8np\" (UID: \"a2db3816-ea50-4e5b-ab5a-a4d306950672\") " pod="openshift-machine-config-operator/machine-config-server-cd8np" Feb 23 09:09:41 crc kubenswrapper[4834]: I0223 09:09:41.985353 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3998427e-dcd1-4c1e-ba32-cfacf3c9fa44-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-crc7p\" (UID: \"3998427e-dcd1-4c1e-ba32-cfacf3c9fa44\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-crc7p" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.002294 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2srz9\" (UniqueName: \"kubernetes.io/projected/ad9b3cbe-a60c-43af-92e7-fb757f59162b-kube-api-access-2srz9\") pod \"openshift-config-operator-7777fb866f-lzrvp\" (UID: \"ad9b3cbe-a60c-43af-92e7-fb757f59162b\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-lzrvp" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.018224 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkltm\" (UniqueName: \"kubernetes.io/projected/1ef3d4b6-98be-4c13-99ee-6787dff39425-kube-api-access-xkltm\") pod \"etcd-operator-b45778765-xzc7c\" (UID: \"1ef3d4b6-98be-4c13-99ee-6787dff39425\") " pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.048549 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-495q2" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.052379 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-8pjt4\" (UID: \"3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8pjt4" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.061303 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:42 crc kubenswrapper[4834]: E0223 09:09:42.062069 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:42.562038283 +0000 UTC m=+118.640352660 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.062694 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:42 crc kubenswrapper[4834]: E0223 09:09:42.065232 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:42.565205551 +0000 UTC m=+118.643519938 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.066639 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54jxz\" (UniqueName: \"kubernetes.io/projected/3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d-kube-api-access-54jxz\") pod \"cluster-image-registry-operator-dc59b4c8b-8pjt4\" (UID: \"3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8pjt4" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.080040 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8pjt4" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.090154 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lzrvp" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.111013 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqpbq\" (UniqueName: \"kubernetes.io/projected/299c472a-f5bc-4330-a44e-82cb3490d9bd-kube-api-access-lqpbq\") pod \"service-ca-operator-777779d784-c7c5g\" (UID: \"299c472a-f5bc-4330-a44e-82cb3490d9bd\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-c7c5g" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.127998 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qfwxn\" (UniqueName: \"kubernetes.io/projected/6b454f53-4bc6-4e49-a80d-2f31fe1dccb9-kube-api-access-qfwxn\") pod \"ingress-canary-ljtgg\" (UID: \"6b454f53-4bc6-4e49-a80d-2f31fe1dccb9\") " pod="openshift-ingress-canary/ingress-canary-ljtgg" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.128301 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.132226 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-bdng5"] Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.148294 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fp6z\" (UniqueName: \"kubernetes.io/projected/8fba4843-d9f5-4156-8d0b-d0e764be8942-kube-api-access-7fp6z\") pod \"machine-config-operator-74547568cd-5kkqj\" (UID: \"8fba4843-d9f5-4156-8d0b-d0e764be8942\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5kkqj" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.159151 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cwqg7" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.162924 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v6q66\" (UniqueName: \"kubernetes.io/projected/9707583d-7687-4854-8501-eacac0abff04-kube-api-access-v6q66\") pod \"package-server-manager-789f6589d5-vdn4d\" (UID: \"9707583d-7687-4854-8501-eacac0abff04\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vdn4d" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.166553 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:42 crc kubenswrapper[4834]: E0223 09:09:42.167138 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:42.667111685 +0000 UTC m=+118.745426072 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.169226 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-crc7p" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.179827 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hv4gk\" (UniqueName: \"kubernetes.io/projected/21b99c3b-2c7f-49ed-9c9c-a4bf0d2318dc-kube-api-access-hv4gk\") pod \"service-ca-9c57cc56f-hwvz9\" (UID: \"21b99c3b-2c7f-49ed-9c9c-a4bf0d2318dc\") " pod="openshift-service-ca/service-ca-9c57cc56f-hwvz9" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.189654 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-2wpmg"] Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.198917 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7lb6z\" (UniqueName: \"kubernetes.io/projected/83a0c203-a9b8-462a-ba20-536641aa0721-kube-api-access-7lb6z\") pod \"csi-hostpathplugin-qrhqs\" (UID: \"83a0c203-a9b8-462a-ba20-536641aa0721\") " pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.203849 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jfn2h" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.217216 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdk6h\" (UniqueName: \"kubernetes.io/projected/4beb05d8-6d1a-488a-8ce4-e35d73b04e38-kube-api-access-mdk6h\") pod \"olm-operator-6b444d44fb-r2npz\" (UID: \"4beb05d8-6d1a-488a-8ce4-e35d73b04e38\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r2npz" Feb 23 09:09:42 crc kubenswrapper[4834]: W0223 09:09:42.237648 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5608eb40_e8c9_4701_85ee_68b1cbd4b79c.slice/crio-01d345ab94daf5fccf1a35d6c6ef745ba76626ac7f99ec3fc677bb2754f78228 WatchSource:0}: Error finding container 01d345ab94daf5fccf1a35d6c6ef745ba76626ac7f99ec3fc677bb2754f78228: Status 404 returned error can't find the container with id 01d345ab94daf5fccf1a35d6c6ef745ba76626ac7f99ec3fc677bb2754f78228 Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.239699 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcjz9\" (UniqueName: \"kubernetes.io/projected/2e300c80-7530-4d72-acd8-4bfdca03c327-kube-api-access-mcjz9\") pod \"dns-default-c9zqb\" (UID: \"2e300c80-7530-4d72-acd8-4bfdca03c327\") " pod="openshift-dns/dns-default-c9zqb" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.271711 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zplh8\" (UniqueName: \"kubernetes.io/projected/a2db3816-ea50-4e5b-ab5a-a4d306950672-kube-api-access-zplh8\") pod \"machine-config-server-cd8np\" (UID: \"a2db3816-ea50-4e5b-ab5a-a4d306950672\") " pod="openshift-machine-config-operator/machine-config-server-cd8np" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.272167 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:42 crc kubenswrapper[4834]: E0223 09:09:42.272642 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:42.772625608 +0000 UTC m=+118.850939995 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.283827 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5kkqj" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.291288 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r2npz" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.295117 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f7vd2" event={"ID":"36d9e9ae-0e67-441b-bbdb-a5292cba2360","Type":"ContainerStarted","Data":"b25bdc1ee6c0b1bbdfb1cbdb6c665deebdbdcbbc1baacf5d4ff5caaabe53455b"} Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.295191 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f7vd2" event={"ID":"36d9e9ae-0e67-441b-bbdb-a5292cba2360","Type":"ContainerStarted","Data":"b2203cb194425d0085aabaf828b5524909c5905463e2d4ee816912b75a18d790"} Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.295206 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f7vd2" event={"ID":"36d9e9ae-0e67-441b-bbdb-a5292cba2360","Type":"ContainerStarted","Data":"470e35eaca1681ef16aa1b9bc99144b82e7780d011302d50a903b0f155ec8cd7"} Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.299165 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-hwvz9" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.310445 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tjx4v\" (UniqueName: \"kubernetes.io/projected/0829b4ef-0b9e-49c6-ba38-daafdd8332ce-kube-api-access-tjx4v\") pod \"machine-config-controller-84d6567774-6rmcx\" (UID: \"0829b4ef-0b9e-49c6-ba38-daafdd8332ce\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rmcx" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.311694 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5kdz\" (UniqueName: \"kubernetes.io/projected/59fb1571-215c-49da-a6ae-3c2152ef19f6-kube-api-access-q5kdz\") pod \"collect-profiles-29530620-fvlp9\" (UID: \"59fb1571-215c-49da-a6ae-3c2152ef19f6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29530620-fvlp9" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.317119 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-c7c5g" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.325797 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kz6hj" event={"ID":"4ae41025-47a2-4192-8650-e3bec3a0a8f7","Type":"ContainerStarted","Data":"5082f6171d5745ed630f2af072a271676441706c29bf5ebb02a62482e22e5e6a"} Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.325851 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kz6hj" event={"ID":"4ae41025-47a2-4192-8650-e3bec3a0a8f7","Type":"ContainerStarted","Data":"ad63341306d169c5b36c28e65857f3f44a15818955c4ce5e4979e80531ae113c"} Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.326287 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-75r79"] Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.327291 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkmzp\" (UniqueName: \"kubernetes.io/projected/42d36f07-e27c-49e0-bad8-472e93d3875d-kube-api-access-lkmzp\") pod \"multus-admission-controller-857f4d67dd-m8kxw\" (UID: \"42d36f07-e27c-49e0-bad8-472e93d3875d\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-m8kxw" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.337071 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4thfs\" (UniqueName: \"kubernetes.io/projected/2c9de897-17f3-4444-ad95-b5e07b40f6c8-kube-api-access-4thfs\") pod \"marketplace-operator-79b997595-hlc9q\" (UID: \"2c9de897-17f3-4444-ad95-b5e07b40f6c8\") " pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.346679 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.353670 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-bdng5" event={"ID":"94bf75a0-18d7-4f14-8b4b-a9050ab2eab7","Type":"ContainerStarted","Data":"e89ba615329312004113c91323e273c649d124ce15368c031a405233524985ee"} Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.355835 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2zrzx" event={"ID":"4ab9ada9-cbae-4592-b699-e3bf33c09a95","Type":"ContainerStarted","Data":"a28518f8402e653218cf7f23ef56c8bc121f53006cd72dea0df0b578c597c670"} Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.355886 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2zrzx" event={"ID":"4ab9ada9-cbae-4592-b699-e3bf33c09a95","Type":"ContainerStarted","Data":"d17c4dfb72842923c3728232915339a731636d17c2386bbd5dd9e7ec9c60fdf4"} Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.356788 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vdn4d" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.364773 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-mxsss" event={"ID":"fbca2b49-c933-408c-9c80-fb1202bfb6f1","Type":"ContainerStarted","Data":"e8c81862bb6adc5492722eab2421453a845f16425516261ab7c6a92c31035680"} Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.364827 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-mxsss" event={"ID":"fbca2b49-c933-408c-9c80-fb1202bfb6f1","Type":"ContainerStarted","Data":"bbc0664f2c28c6957ddbb7600f66fe0bc591f13fd64d49f2b3f07e39df868539"} Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.371107 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-mxsss" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.373231 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:42 crc kubenswrapper[4834]: E0223 09:09:42.373488 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:42.873458502 +0000 UTC m=+118.951772879 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.373831 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" event={"ID":"c628e572-63b9-478b-bf3a-6ff1966480a1","Type":"ContainerStarted","Data":"196a2a8abb6ff0a4f63033b3c41b8123d65b9b382205cbd5519e5687f9403ef3"} Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.374435 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.375081 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vxrl5\" (UniqueName: \"kubernetes.io/projected/a00a2272-643a-4144-aa4c-ff2d40639e8c-kube-api-access-vxrl5\") pod \"catalog-operator-68c6474976-9skpv\" (UID: \"a00a2272-643a-4144-aa4c-ff2d40639e8c\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9skpv" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.375405 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-n6kdb" event={"ID":"6e72626f-db77-4945-8fb1-48c1d7507251","Type":"ContainerStarted","Data":"223153aa426884fc43e35a6d65d6fcb1c33ec188c3ccab1580b754a02a36b5ec"} Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.376177 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.376388 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-cd8np" Feb 23 09:09:42 crc kubenswrapper[4834]: E0223 09:09:42.376933 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:42.876917989 +0000 UTC m=+118.955232376 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.377629 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2wpmg" event={"ID":"5608eb40-e8c9-4701-85ee-68b1cbd4b79c","Type":"ContainerStarted","Data":"01d345ab94daf5fccf1a35d6c6ef745ba76626ac7f99ec3fc677bb2754f78228"} Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.385933 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-c9zqb" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.387104 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-rf5l7" event={"ID":"f489a96f-1839-4986-9340-e9b9d8960435","Type":"ContainerStarted","Data":"04478eec9159902922dc77453bb4a2de681f7a1c399059ee6a8daa80aaa4ef10"} Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.387147 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-rf5l7" event={"ID":"f489a96f-1839-4986-9340-e9b9d8960435","Type":"ContainerStarted","Data":"b67e1e068f7ee12cfcb72f31e2bfdb4b6c15364fc90babf1f26b709b546b73bc"} Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.390057 4834 generic.go:334] "Generic (PLEG): container finished" podID="7df8904d-d89c-41c3-b207-795a41e7cd3f" containerID="16f14afcd77540a74b9b27bfc232ab22b96f653c658bd0c1a176c6b99457de17" exitCode=0 Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.390259 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" event={"ID":"7df8904d-d89c-41c3-b207-795a41e7cd3f","Type":"ContainerDied","Data":"16f14afcd77540a74b9b27bfc232ab22b96f653c658bd0c1a176c6b99457de17"} Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.393808 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-ljtgg" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.397075 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" event={"ID":"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd","Type":"ContainerStarted","Data":"8369d4e8ebce379a776a0eaa126786ab666b5c9fd5576ea4bb678328bff9c1e2"} Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.397985 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.400535 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nsbmq\" (UniqueName: \"kubernetes.io/projected/617a1fea-21a9-4cfd-81df-d0f3ebd52652-kube-api-access-nsbmq\") pod \"packageserver-d55dfcdfc-fg4xb\" (UID: \"617a1fea-21a9-4cfd-81df-d0f3ebd52652\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.400881 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-bjngd" event={"ID":"0f47419a-2e53-440b-854b-9fd226fb17d2","Type":"ContainerStarted","Data":"157e4c728e0b42003ad826d173058ed313ccd98516405879514d1ea7be3fc03f"} Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.400913 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-bjngd" event={"ID":"0f47419a-2e53-440b-854b-9fd226fb17d2","Type":"ContainerStarted","Data":"dbd42f7c04b534ceb640982aa9d06bd37bed739122b5d464d7474c5154aa33ff"} Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.405676 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-qt6tc" event={"ID":"22033762-41c1-4e84-88d5-59187abf701f","Type":"ContainerStarted","Data":"f9c28b3fe21042c022592b50751d90cf041d608edc5a6d160077e4d5e62fb208"} Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.406901 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k9lbb"] Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.409407 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btxmv\" (UniqueName: \"kubernetes.io/projected/441917a1-296e-4529-a79f-458faf4769e6-kube-api-access-btxmv\") pod \"cni-sysctl-allowlist-ds-rlmdc\" (UID: \"441917a1-296e-4529-a79f-458faf4769e6\") " pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.411497 4834 generic.go:334] "Generic (PLEG): container finished" podID="782436e6-7d7f-4e44-afe6-542014b15e86" containerID="d8fe1df8fdcce6b220763e1fa88fc95c2c108131ee364d1a8d590a7f10dcced8" exitCode=0 Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.411588 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-897nm" event={"ID":"782436e6-7d7f-4e44-afe6-542014b15e86","Type":"ContainerDied","Data":"d8fe1df8fdcce6b220763e1fa88fc95c2c108131ee364d1a8d590a7f10dcced8"} Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.413005 4834 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-8mk2m container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.413065 4834 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" podUID="dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.24:8443/healthz\": dial tcp 10.217.0.24:8443: connect: connection refused" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.442384 4834 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-xslpt container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.442999 4834 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" podUID="c628e572-63b9-478b-bf3a-6ff1966480a1" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.442777 4834 patch_prober.go:28] interesting pod/downloads-7954f5f757-mxsss container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.443567 4834 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-mxsss" podUID="fbca2b49-c933-408c-9c80-fb1202bfb6f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.488350 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:42 crc kubenswrapper[4834]: E0223 09:09:42.488875 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:42.988851141 +0000 UTC m=+119.067165528 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.489481 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:42 crc kubenswrapper[4834]: E0223 09:09:42.499018 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:42.998992941 +0000 UTC m=+119.077307328 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.542329 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-rf5l7" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.543191 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.557011 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-m8kxw" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.567527 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rmcx" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.575921 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9skpv" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.592142 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:42 crc kubenswrapper[4834]: E0223 09:09:42.592278 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:43.092251095 +0000 UTC m=+119.170565482 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.592668 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:42 crc kubenswrapper[4834]: E0223 09:09:42.602940 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:43.10288229 +0000 UTC m=+119.181196677 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.608735 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29530620-fvlp9" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.621028 4834 patch_prober.go:28] interesting pod/router-default-5444994796-rf5l7 container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.621115 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rf5l7" podUID="f489a96f-1839-4986-9340-e9b9d8960435" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.625822 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.672096 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.694170 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:42 crc kubenswrapper[4834]: E0223 09:09:42.694622 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:43.194580541 +0000 UTC m=+119.272894928 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.795798 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:42 crc kubenswrapper[4834]: E0223 09:09:42.803539 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:43.30351515 +0000 UTC m=+119.381829537 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.861970 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-495q2"] Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.900225 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:42 crc kubenswrapper[4834]: E0223 09:09:42.900345 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:43.400321532 +0000 UTC m=+119.478635919 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.900628 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.900658 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-mxsss" podStartSLOduration=51.900635231 podStartE2EDuration="51.900635231s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:42.899698585 +0000 UTC m=+118.978012982" watchObservedRunningTime="2026-02-23 09:09:42.900635231 +0000 UTC m=+118.978949628" Feb 23 09:09:42 crc kubenswrapper[4834]: E0223 09:09:42.901044 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:43.401036582 +0000 UTC m=+119.479350969 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:42 crc kubenswrapper[4834]: I0223 09:09:42.903711 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-lzrvp"] Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.002955 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:43 crc kubenswrapper[4834]: E0223 09:09:43.004728 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:43.504702385 +0000 UTC m=+119.583016762 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.084613 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" podStartSLOduration=51.084591638 podStartE2EDuration="51.084591638s" podCreationTimestamp="2026-02-23 09:08:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:43.081422581 +0000 UTC m=+119.159736968" watchObservedRunningTime="2026-02-23 09:09:43.084591638 +0000 UTC m=+119.162906025" Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.105878 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:43 crc kubenswrapper[4834]: E0223 09:09:43.106289 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:43.60627498 +0000 UTC m=+119.684589367 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.129626 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.207779 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:43 crc kubenswrapper[4834]: E0223 09:09:43.208202 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:43.708175263 +0000 UTC m=+119.786489650 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.310354 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:43 crc kubenswrapper[4834]: E0223 09:09:43.310825 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:43.810808847 +0000 UTC m=+119.889123234 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.412663 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:43 crc kubenswrapper[4834]: E0223 09:09:43.413360 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:43.913342499 +0000 UTC m=+119.991656886 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.422349 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k9lbb" event={"ID":"c76b76a1-92df-4e16-b72b-ae9f3d952c72","Type":"ContainerStarted","Data":"d939ba5069a63adbd9ff6062a557684cc6c2d13161c7965524e1bdc326bcae5a"} Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.423865 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-bdng5" event={"ID":"94bf75a0-18d7-4f14-8b4b-a9050ab2eab7","Type":"ContainerStarted","Data":"5ffe2ecb204bf7ec932b55c1f421c263e75ad2556e46ab1bb5d76a8b63317714"} Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.425135 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-bdng5" Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.434093 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" event={"ID":"441917a1-296e-4529-a79f-458faf4769e6","Type":"ContainerStarted","Data":"6ead88ab6c73ff9c9ba14cdc18ad59de3aa8e95987d5371642be9808ca0c77d9"} Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.435611 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-n6kdb" event={"ID":"6e72626f-db77-4945-8fb1-48c1d7507251","Type":"ContainerStarted","Data":"7a0ef899f2e80020f1acb969b23a5b3198fe2172beb034b676ca3a08ea4503b8"} Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.436420 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-cd8np" event={"ID":"a2db3816-ea50-4e5b-ab5a-a4d306950672","Type":"ContainerStarted","Data":"8d8088f9c16711ad91c09e6b41a2b4519d6bb1038d64cfc81f5ec0876019aaef"} Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.437232 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2wpmg" event={"ID":"5608eb40-e8c9-4701-85ee-68b1cbd4b79c","Type":"ContainerStarted","Data":"9aa7d9f64075513aaba16f3384722751b1c0ecf274b72a2c1ca2198633620987"} Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.461908 4834 patch_prober.go:28] interesting pod/console-operator-58897d9998-bdng5 container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.25:8443/readyz\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.461967 4834 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-bdng5" podUID="94bf75a0-18d7-4f14-8b4b-a9050ab2eab7" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/readyz\": dial tcp 10.217.0.25:8443: connect: connection refused" Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.489865 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kz6hj" event={"ID":"4ae41025-47a2-4192-8650-e3bec3a0a8f7","Type":"ContainerStarted","Data":"32ea58c5d10d3855c1b67d1d37f343a81479f4c4a1cf61aeecefd8802fd5ce09"} Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.512169 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-75r79" event={"ID":"383bf705-7d54-4400-9739-2b7c48b20ef8","Type":"ContainerStarted","Data":"afc8326364764d24e5bd23b7a7dfc348b9de5f42107a8950493543687473e4f4"} Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.515555 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:43 crc kubenswrapper[4834]: E0223 09:09:43.517185 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:44.017172366 +0000 UTC m=+120.095486753 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.573220 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lzrvp" event={"ID":"ad9b3cbe-a60c-43af-92e7-fb757f59162b","Type":"ContainerStarted","Data":"cf5cb8d1714b188cbe86c48cfb86f35520a5040d0a537c3c332c07048eee3e93"} Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.577599 4834 patch_prober.go:28] interesting pod/downloads-7954f5f757-mxsss container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.577666 4834 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-mxsss" podUID="fbca2b49-c933-408c-9c80-fb1202bfb6f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.606480 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.611959 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8pjt4"] Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.617269 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:43 crc kubenswrapper[4834]: E0223 09:09:43.624520 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:44.124469609 +0000 UTC m=+120.202783996 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.639699 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-crc7p"] Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.641866 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-xzc7c"] Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.642200 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-v7mzh" podStartSLOduration=52.64218962 podStartE2EDuration="52.64218962s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:43.622934176 +0000 UTC m=+119.701248563" watchObservedRunningTime="2026-02-23 09:09:43.64218962 +0000 UTC m=+119.720504007" Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.719253 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:43 crc kubenswrapper[4834]: E0223 09:09:43.719859 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:44.219832591 +0000 UTC m=+120.298146978 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.778953 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" podStartSLOduration=52.778929189 podStartE2EDuration="52.778929189s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:43.775987097 +0000 UTC m=+119.854301494" watchObservedRunningTime="2026-02-23 09:09:43.778929189 +0000 UTC m=+119.857243576" Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.804948 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-2zrzx" podStartSLOduration=52.804929909 podStartE2EDuration="52.804929909s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:43.804786305 +0000 UTC m=+119.883100702" watchObservedRunningTime="2026-02-23 09:09:43.804929909 +0000 UTC m=+119.883244296" Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.821401 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:43 crc kubenswrapper[4834]: E0223 09:09:43.821878 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:44.321857778 +0000 UTC m=+120.400172165 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.865135 4834 patch_prober.go:28] interesting pod/router-default-5444994796-rf5l7 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 23 09:09:43 crc kubenswrapper[4834]: [-]has-synced failed: reason withheld Feb 23 09:09:43 crc kubenswrapper[4834]: [+]process-running ok Feb 23 09:09:43 crc kubenswrapper[4834]: healthz check failed Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.865192 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rf5l7" podUID="f489a96f-1839-4986-9340-e9b9d8960435" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 23 09:09:43 crc kubenswrapper[4834]: I0223 09:09:43.923578 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:43 crc kubenswrapper[4834]: E0223 09:09:43.924002 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:44.423984529 +0000 UTC m=+120.502298916 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.025176 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:44 crc kubenswrapper[4834]: E0223 09:09:44.025367 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:44.525326517 +0000 UTC m=+120.603640904 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.025784 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:44 crc kubenswrapper[4834]: E0223 09:09:44.026171 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:44.526157499 +0000 UTC m=+120.604471876 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.035250 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-qt6tc" podStartSLOduration=53.035228751 podStartE2EDuration="53.035228751s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:44.034740867 +0000 UTC m=+120.113055264" watchObservedRunningTime="2026-02-23 09:09:44.035228751 +0000 UTC m=+120.113543138" Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.043252 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" podStartSLOduration=53.043228092 podStartE2EDuration="53.043228092s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:44.00379419 +0000 UTC m=+120.082108587" watchObservedRunningTime="2026-02-23 09:09:44.043228092 +0000 UTC m=+120.121542489" Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.090365 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-g9jpx" podStartSLOduration=53.090336518 podStartE2EDuration="53.090336518s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:44.073516072 +0000 UTC m=+120.151830449" watchObservedRunningTime="2026-02-23 09:09:44.090336518 +0000 UTC m=+120.168650905" Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.131104 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:44 crc kubenswrapper[4834]: E0223 09:09:44.131351 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:44.631320144 +0000 UTC m=+120.709634531 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.131541 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:44 crc kubenswrapper[4834]: E0223 09:09:44.131841 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:44.631834588 +0000 UTC m=+120.710148975 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.233471 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:44 crc kubenswrapper[4834]: E0223 09:09:44.233774 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:44.733742541 +0000 UTC m=+120.812056928 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.233941 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:44 crc kubenswrapper[4834]: E0223 09:09:44.234292 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:44.734281807 +0000 UTC m=+120.812596194 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.273133 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-bjngd" podStartSLOduration=52.273113002 podStartE2EDuration="52.273113002s" podCreationTimestamp="2026-02-23 09:08:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:44.268602308 +0000 UTC m=+120.346916695" watchObservedRunningTime="2026-02-23 09:09:44.273113002 +0000 UTC m=+120.351427389" Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.295273 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-rf5l7" podStartSLOduration=53.295255296 podStartE2EDuration="53.295255296s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:44.291734539 +0000 UTC m=+120.370048926" watchObservedRunningTime="2026-02-23 09:09:44.295255296 +0000 UTC m=+120.373569683" Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.349853 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:44 crc kubenswrapper[4834]: E0223 09:09:44.350849 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:44.850816936 +0000 UTC m=+120.929131323 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.435662 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cwqg7"] Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.444566 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-f7vd2" podStartSLOduration=53.444537053 podStartE2EDuration="53.444537053s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:44.428326744 +0000 UTC m=+120.506641131" watchObservedRunningTime="2026-02-23 09:09:44.444537053 +0000 UTC m=+120.522851440" Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.454513 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:44 crc kubenswrapper[4834]: E0223 09:09:44.458486 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:44.958466819 +0000 UTC m=+121.036781216 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.491001 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-bdng5" podStartSLOduration=53.49098385 podStartE2EDuration="53.49098385s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:44.488010647 +0000 UTC m=+120.566325034" watchObservedRunningTime="2026-02-23 09:09:44.49098385 +0000 UTC m=+120.569298227" Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.549878 4834 patch_prober.go:28] interesting pod/router-default-5444994796-rf5l7 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 23 09:09:44 crc kubenswrapper[4834]: [-]has-synced failed: reason withheld Feb 23 09:09:44 crc kubenswrapper[4834]: [+]process-running ok Feb 23 09:09:44 crc kubenswrapper[4834]: healthz check failed Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.550283 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rf5l7" podUID="f489a96f-1839-4986-9340-e9b9d8960435" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.565138 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:44 crc kubenswrapper[4834]: E0223 09:09:44.565558 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:45.065537546 +0000 UTC m=+121.143851933 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.680382 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-crc7p" event={"ID":"3998427e-dcd1-4c1e-ba32-cfacf3c9fa44","Type":"ContainerStarted","Data":"766822181d57586eab9128b59d45660b92d9ae01894f63b683341162529b4ad7"} Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.680471 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" event={"ID":"1ef3d4b6-98be-4c13-99ee-6787dff39425","Type":"ContainerStarted","Data":"6e80821250ecb675e411e616644c1455f8f13ab3e95a7027f3b6e82ca3e88515"} Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.680932 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-kz6hj" podStartSLOduration=53.680921623 podStartE2EDuration="53.680921623s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:44.516219599 +0000 UTC m=+120.594533986" watchObservedRunningTime="2026-02-23 09:09:44.680921623 +0000 UTC m=+120.759236010" Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.681621 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:44 crc kubenswrapper[4834]: E0223 09:09:44.681937 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:45.181925311 +0000 UTC m=+121.260239698 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.711049 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-495q2" event={"ID":"59d59459-2ab3-40d8-9cc4-f68e34377748","Type":"ContainerStarted","Data":"2a6761c4b305be0a5ebc32b6cfa3f249766afae51552a731b214937df87910e9"} Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.786252 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:44 crc kubenswrapper[4834]: E0223 09:09:44.786899 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:45.286876269 +0000 UTC m=+121.365190656 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.821177 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" event={"ID":"7df8904d-d89c-41c3-b207-795a41e7cd3f","Type":"ContainerStarted","Data":"658994df39dadb4d4c75d5a905d934d5c75fca5193e13b3f20a01bc72c0c1a14"} Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.832995 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cwqg7" event={"ID":"bf3d38f3-bb40-4318-bd61-19df5d4e3572","Type":"ContainerStarted","Data":"fc285742569c00df12cbb09db542504332a0923f5b64157f8e867f9dcfca3845"} Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.843115 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vdn4d"] Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.888920 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:44 crc kubenswrapper[4834]: E0223 09:09:44.895328 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:45.395277934 +0000 UTC m=+121.473592321 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.906621 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-hwvz9"] Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.910705 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8pjt4" event={"ID":"3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d","Type":"ContainerStarted","Data":"3381b3b7c44f560a72d228e9d7690ef0edf9eafd83b34078e74a3f9878ed955c"} Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.915531 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-c7c5g"] Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.922810 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r2npz"] Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.924463 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jfn2h"] Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.968525 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-897nm" event={"ID":"782436e6-7d7f-4e44-afe6-542014b15e86","Type":"ContainerStarted","Data":"2c786d59edc0408674e8c53251a5bf8b374f28fb80a4bb44e938038fa570d83f"} Feb 23 09:09:44 crc kubenswrapper[4834]: I0223 09:09:44.990035 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:44 crc kubenswrapper[4834]: E0223 09:09:44.991390 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:45.491368686 +0000 UTC m=+121.569683073 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.025365 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb"] Feb 23 09:09:45 crc kubenswrapper[4834]: W0223 09:09:45.031559 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2ba759e0_a281_467d_aa5a_7ac7d97c67fc.slice/crio-6dd7f5c3a9c98edfe0ad10387388b2dbf448919d5206ad67eaac493775ab4236 WatchSource:0}: Error finding container 6dd7f5c3a9c98edfe0ad10387388b2dbf448919d5206ad67eaac493775ab4236: Status 404 returned error can't find the container with id 6dd7f5c3a9c98edfe0ad10387388b2dbf448919d5206ad67eaac493775ab4236 Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.079041 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" podStartSLOduration=53.079020415 podStartE2EDuration="53.079020415s" podCreationTimestamp="2026-02-23 09:08:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:45.078713366 +0000 UTC m=+121.157027763" watchObservedRunningTime="2026-02-23 09:09:45.079020415 +0000 UTC m=+121.157334802" Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.082224 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-bdng5" Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.088037 4834 csr.go:261] certificate signing request csr-k85gr is approved, waiting to be issued Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.091956 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:45 crc kubenswrapper[4834]: E0223 09:09:45.097089 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:45.597074846 +0000 UTC m=+121.675389233 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.102336 4834 csr.go:257] certificate signing request csr-k85gr is issued Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.148714 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-qrhqs"] Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.193097 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:45 crc kubenswrapper[4834]: E0223 09:09:45.193488 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:45.693466466 +0000 UTC m=+121.771780853 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.227073 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-c9zqb"] Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.237293 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-5kkqj"] Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.276497 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-m8kxw"] Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.298844 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-hlc9q"] Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.299713 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:45 crc kubenswrapper[4834]: E0223 09:09:45.300155 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:45.800140322 +0000 UTC m=+121.878454709 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.308063 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29530620-fvlp9"] Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.333471 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9skpv"] Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.400799 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:45 crc kubenswrapper[4834]: E0223 09:09:45.401446 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:45.901402268 +0000 UTC m=+121.979716655 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.407166 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-ljtgg"] Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.435119 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-6rmcx"] Feb 23 09:09:45 crc kubenswrapper[4834]: W0223 09:09:45.453997 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod59fb1571_215c_49da_a6ae_3c2152ef19f6.slice/crio-dd43f72b5c79682ab59e65203268340cae860417b1a42a1fa7190a5030324c1a WatchSource:0}: Error finding container dd43f72b5c79682ab59e65203268340cae860417b1a42a1fa7190a5030324c1a: Status 404 returned error can't find the container with id dd43f72b5c79682ab59e65203268340cae860417b1a42a1fa7190a5030324c1a Feb 23 09:09:45 crc kubenswrapper[4834]: W0223 09:09:45.485584 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda00a2272_643a_4144_aa4c_ff2d40639e8c.slice/crio-5f78f8a2a5507810f17f35e2434dcad090978273dc911deac3b998c44ddaa645 WatchSource:0}: Error finding container 5f78f8a2a5507810f17f35e2434dcad090978273dc911deac3b998c44ddaa645: Status 404 returned error can't find the container with id 5f78f8a2a5507810f17f35e2434dcad090978273dc911deac3b998c44ddaa645 Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.509247 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:45 crc kubenswrapper[4834]: E0223 09:09:45.509736 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:46.00972017 +0000 UTC m=+122.088034557 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.547461 4834 patch_prober.go:28] interesting pod/router-default-5444994796-rf5l7 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 23 09:09:45 crc kubenswrapper[4834]: [-]has-synced failed: reason withheld Feb 23 09:09:45 crc kubenswrapper[4834]: [+]process-running ok Feb 23 09:09:45 crc kubenswrapper[4834]: healthz check failed Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.547942 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rf5l7" podUID="f489a96f-1839-4986-9340-e9b9d8960435" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 23 09:09:45 crc kubenswrapper[4834]: W0223 09:09:45.576664 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6b454f53_4bc6_4e49_a80d_2f31fe1dccb9.slice/crio-156635dad12d847f007d8a4429249db91456d106f2cac42f5a837b3a5a38f489 WatchSource:0}: Error finding container 156635dad12d847f007d8a4429249db91456d106f2cac42f5a837b3a5a38f489: Status 404 returned error can't find the container with id 156635dad12d847f007d8a4429249db91456d106f2cac42f5a837b3a5a38f489 Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.610830 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:45 crc kubenswrapper[4834]: E0223 09:09:45.611175 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:46.11115634 +0000 UTC m=+122.189470727 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.715042 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:45 crc kubenswrapper[4834]: E0223 09:09:45.715495 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:46.215479161 +0000 UTC m=+122.293793548 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.719599 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-8mk2m"] Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.719951 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" podUID="dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39" containerName="controller-manager" containerID="cri-o://1afc1551d544cf2a7d08d6356b6ea290d5abce35e1f693735cfc3c93ecafc0db" gracePeriod=30 Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.744175 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.772242 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt"] Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.819530 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:45 crc kubenswrapper[4834]: E0223 09:09:45.820368 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:46.320335567 +0000 UTC m=+122.398649944 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.921096 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:45 crc kubenswrapper[4834]: E0223 09:09:45.921624 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:46.421607843 +0000 UTC m=+122.499922230 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.924702 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:45 crc kubenswrapper[4834]: I0223 09:09:45.924869 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.030233 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:46 crc kubenswrapper[4834]: E0223 09:09:46.030696 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:46.530673595 +0000 UTC m=+122.608987982 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.055400 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k9lbb" event={"ID":"c76b76a1-92df-4e16-b72b-ae9f3d952c72","Type":"ContainerStarted","Data":"31cddc17bcd02d4012f16b40a8071dcd3b99ef73a1aa63c0daf79f9c5b7692f4"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.061865 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vdn4d" event={"ID":"9707583d-7687-4854-8501-eacac0abff04","Type":"ContainerStarted","Data":"4b6b3b2becf62576f3fbe908b7c0e715e2649faaaa2d79a9f8666c8991d48439"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.061919 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vdn4d" event={"ID":"9707583d-7687-4854-8501-eacac0abff04","Type":"ContainerStarted","Data":"e52801dde35d32f66348a15d27e1978e61865776d5cd3d462bdf8776e437e9e4"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.068553 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-495q2" event={"ID":"59d59459-2ab3-40d8-9cc4-f68e34377748","Type":"ContainerStarted","Data":"2441ac5ecdabe1e08b9a409c51eefc35b9ac21b4a0a2292579da7487a801d092"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.094745 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" event={"ID":"2c9de897-17f3-4444-ad95-b5e07b40f6c8","Type":"ContainerStarted","Data":"45f772b5eea9ac90ace0bdddc67f74973cc3eaea9b4048fd2e77f90014365c55"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.096347 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-k9lbb" podStartSLOduration=54.096326854 podStartE2EDuration="54.096326854s" podCreationTimestamp="2026-02-23 09:08:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:46.09401598 +0000 UTC m=+122.172330367" watchObservedRunningTime="2026-02-23 09:09:46.096326854 +0000 UTC m=+122.174641231" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.097617 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rmcx" event={"ID":"0829b4ef-0b9e-49c6-ba38-daafdd8332ce","Type":"ContainerStarted","Data":"45e65fc380610b7f49543439665c710f3974dbe42da08c2456bd569acd661bd4"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.104535 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-02-23 09:04:45 +0000 UTC, rotation deadline is 2026-12-03 22:08:29.914363241 +0000 UTC Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.104598 4834 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 6804h58m43.809768418s for next certificate rotation Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.108663 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-75r79" event={"ID":"383bf705-7d54-4400-9739-2b7c48b20ef8","Type":"ContainerStarted","Data":"0f9b857b2546a60158e2fdf161ffd3bdab1ad8afcf5d6e32c731d8054a44b3ea"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.131524 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:46 crc kubenswrapper[4834]: E0223 09:09:46.131850 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:46.631837069 +0000 UTC m=+122.710151456 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.136918 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" event={"ID":"83a0c203-a9b8-462a-ba20-536641aa0721","Type":"ContainerStarted","Data":"ad72f4efd209b37798daea811e0f2fc74c7c63888f4d199d72dbc189630608e4"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.152906 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-75r79" podStartSLOduration=55.152887201 podStartE2EDuration="55.152887201s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:46.151465563 +0000 UTC m=+122.229779950" watchObservedRunningTime="2026-02-23 09:09:46.152887201 +0000 UTC m=+122.231201588" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.170267 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-hwvz9" event={"ID":"21b99c3b-2c7f-49ed-9c9c-a4bf0d2318dc","Type":"ContainerStarted","Data":"8934ec70175125d488615df15c3f71f3b7fc4ff2942e36df5b91f0f7b3ce1885"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.170323 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-hwvz9" event={"ID":"21b99c3b-2c7f-49ed-9c9c-a4bf0d2318dc","Type":"ContainerStarted","Data":"655694f274a6c0106fe21bc7a2c68f26d377cc1b8d811d7dc624df291c4dab34"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.194700 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-c9zqb" event={"ID":"2e300c80-7530-4d72-acd8-4bfdca03c327","Type":"ContainerStarted","Data":"ed192364013ab379272a50bc2812643c55d9f6efdb132d49633937f0696e166d"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.208642 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-hwvz9" podStartSLOduration=54.208624646 podStartE2EDuration="54.208624646s" podCreationTimestamp="2026-02-23 09:08:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:46.206849687 +0000 UTC m=+122.285164074" watchObservedRunningTime="2026-02-23 09:09:46.208624646 +0000 UTC m=+122.286939033" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.233279 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:46 crc kubenswrapper[4834]: E0223 09:09:46.234830 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:46.734794982 +0000 UTC m=+122.813109449 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.240748 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29530620-fvlp9" event={"ID":"59fb1571-215c-49da-a6ae-3c2152ef19f6","Type":"ContainerStarted","Data":"dd43f72b5c79682ab59e65203268340cae860417b1a42a1fa7190a5030324c1a"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.263216 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29530620-fvlp9" podStartSLOduration=55.263194109 podStartE2EDuration="55.263194109s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:46.263142967 +0000 UTC m=+122.341457354" watchObservedRunningTime="2026-02-23 09:09:46.263194109 +0000 UTC m=+122.341508496" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.277901 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-crc7p" event={"ID":"3998427e-dcd1-4c1e-ba32-cfacf3c9fa44","Type":"ContainerStarted","Data":"409c64797928ae0d10af99584cde044a4c7cb6f84a7f6379622b9dbd3c57b3e1"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.312444 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.321775 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-n6kdb" event={"ID":"6e72626f-db77-4945-8fb1-48c1d7507251","Type":"ContainerStarted","Data":"f25d4290136d2e6797b9f375469289104d6c3f5427c268fdf519176014943171"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.338404 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:46 crc kubenswrapper[4834]: E0223 09:09:46.339962 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:46.839945295 +0000 UTC m=+122.918259772 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.371728 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-cd8np" event={"ID":"a2db3816-ea50-4e5b-ab5a-a4d306950672","Type":"ContainerStarted","Data":"f64580ff1ab5eeb7a07c900a101754757a6d0c9007f81b22d1127d6425bfa3db"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.385445 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cwqg7" event={"ID":"bf3d38f3-bb40-4318-bd61-19df5d4e3572","Type":"ContainerStarted","Data":"026d7b101a07fceddf1ece6b29307578f7502532494fefdd4c36f4d9dac604db"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.397746 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-crc7p" podStartSLOduration=55.397728737 podStartE2EDuration="55.397728737s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:46.324830407 +0000 UTC m=+122.403144794" watchObservedRunningTime="2026-02-23 09:09:46.397728737 +0000 UTC m=+122.476043124" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.402024 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-ljtgg" event={"ID":"6b454f53-4bc6-4e49-a80d-2f31fe1dccb9","Type":"ContainerStarted","Data":"156635dad12d847f007d8a4429249db91456d106f2cac42f5a837b3a5a38f489"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.418732 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9skpv" event={"ID":"a00a2272-643a-4144-aa4c-ff2d40639e8c","Type":"ContainerStarted","Data":"5f78f8a2a5507810f17f35e2434dcad090978273dc911deac3b998c44ddaa645"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.419173 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9skpv" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.446131 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:46 crc kubenswrapper[4834]: E0223 09:09:46.446748 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:46.946708963 +0000 UTC m=+123.025023350 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.446847 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ceb8401-3a07-422f-ae4d-14366611f4a6-metrics-certs\") pod \"network-metrics-daemon-nzcfx\" (UID: \"4ceb8401-3a07-422f-ae4d-14366611f4a6\") " pod="openshift-multus/network-metrics-daemon-nzcfx" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.446954 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:46 crc kubenswrapper[4834]: E0223 09:09:46.448036 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:46.9480258 +0000 UTC m=+123.026340177 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.455864 4834 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-9skpv container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.455932 4834 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9skpv" podUID="a00a2272-643a-4144-aa4c-ff2d40639e8c" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.463782 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5kkqj" event={"ID":"8fba4843-d9f5-4156-8d0b-d0e764be8942","Type":"ContainerStarted","Data":"4bae195c6c48f686a9b772acec397875c75e8771448d058c82cb1e174a5e3d2f"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.469333 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.470161 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r2npz" event={"ID":"4beb05d8-6d1a-488a-8ce4-e35d73b04e38","Type":"ContainerStarted","Data":"8ac1d80a7b5900447a2853b5faa54f59d6412ad55bbc9f571348c2565de1aefd"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.470195 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r2npz" event={"ID":"4beb05d8-6d1a-488a-8ce4-e35d73b04e38","Type":"ContainerStarted","Data":"b7e3d586a8679d185b73b7b66152e72abebdfb9ac41e1973d4a324c88fc3b6cf"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.473115 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" event={"ID":"1ef3d4b6-98be-4c13-99ee-6787dff39425","Type":"ContainerStarted","Data":"03ca47e9cf966ef1b345b3e5dfd9d24617e94fa4502baaea115127b04318205e"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.475796 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jfn2h" event={"ID":"2ba759e0-a281-467d-aa5a-7ac7d97c67fc","Type":"ContainerStarted","Data":"6dd7f5c3a9c98edfe0ad10387388b2dbf448919d5206ad67eaac493775ab4236"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.478586 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-m8kxw" event={"ID":"42d36f07-e27c-49e0-bad8-472e93d3875d","Type":"ContainerStarted","Data":"81819249e7c9e55c3787be1827693a26c1b48b8050feb240f04b9fd8d81032ff"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.480721 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8pjt4" event={"ID":"3cf5cb6f-3f38-48ac-b97d-ab66016b8f5d","Type":"ContainerStarted","Data":"15a8247d3c415e0ad1d0da58a1315936eddd13735cf073ec5e12146e24f5cbc2"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.483601 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb" event={"ID":"617a1fea-21a9-4cfd-81df-d0f3ebd52652","Type":"ContainerStarted","Data":"79d21a2a506d961a8f8b87c158a107e46d2f877ddc8097a1ba9045e62b0a9a32"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.483633 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb" event={"ID":"617a1fea-21a9-4cfd-81df-d0f3ebd52652","Type":"ContainerStarted","Data":"1e920be387927c7dba19cfdb74fd8f558f2baf3b52c21071c3633c347964bb07"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.484540 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.504995 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-cwqg7" podStartSLOduration=55.504958408 podStartE2EDuration="55.504958408s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:46.502951322 +0000 UTC m=+122.581265709" watchObservedRunningTime="2026-02-23 09:09:46.504958408 +0000 UTC m=+122.583272795" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.505322 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-n6kdb" podStartSLOduration=55.505314927 podStartE2EDuration="55.505314927s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:46.418994055 +0000 UTC m=+122.497308442" watchObservedRunningTime="2026-02-23 09:09:46.505314927 +0000 UTC m=+122.583629314" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.513656 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/4ceb8401-3a07-422f-ae4d-14366611f4a6-metrics-certs\") pod \"network-metrics-daemon-nzcfx\" (UID: \"4ceb8401-3a07-422f-ae4d-14366611f4a6\") " pod="openshift-multus/network-metrics-daemon-nzcfx" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.533984 4834 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-fg4xb container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.40:5443/healthz\": dial tcp 10.217.0.40:5443: connect: connection refused" start-of-body= Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.534392 4834 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb" podUID="617a1fea-21a9-4cfd-81df-d0f3ebd52652" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.40:5443/healthz\": dial tcp 10.217.0.40:5443: connect: connection refused" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.543259 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9skpv" podStartSLOduration=54.543238049 podStartE2EDuration="54.543238049s" podCreationTimestamp="2026-02-23 09:08:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:46.541990114 +0000 UTC m=+122.620304501" watchObservedRunningTime="2026-02-23 09:09:46.543238049 +0000 UTC m=+122.621552436" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.544761 4834 patch_prober.go:28] interesting pod/router-default-5444994796-rf5l7 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 23 09:09:46 crc kubenswrapper[4834]: [-]has-synced failed: reason withheld Feb 23 09:09:46 crc kubenswrapper[4834]: [+]process-running ok Feb 23 09:09:46 crc kubenswrapper[4834]: healthz check failed Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.544840 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rf5l7" podUID="f489a96f-1839-4986-9340-e9b9d8960435" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.547279 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-897nm" event={"ID":"782436e6-7d7f-4e44-afe6-542014b15e86","Type":"ContainerStarted","Data":"3f0f904a7105e921ed754d616f893629fc58931e5bfc93159ebdc18f07b9205e"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.548833 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:46 crc kubenswrapper[4834]: E0223 09:09:46.550136 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:47.050103219 +0000 UTC m=+123.128417636 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.552573 4834 generic.go:334] "Generic (PLEG): container finished" podID="dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39" containerID="1afc1551d544cf2a7d08d6356b6ea290d5abce35e1f693735cfc3c93ecafc0db" exitCode=0 Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.552678 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" event={"ID":"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39","Type":"ContainerDied","Data":"1afc1551d544cf2a7d08d6356b6ea290d5abce35e1f693735cfc3c93ecafc0db"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.557609 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" event={"ID":"441917a1-296e-4529-a79f-458faf4769e6","Type":"ContainerStarted","Data":"95ec29942c140fb247fc3ef2bd3bbf3e1ed417f5378a449fecd0d42c697f0d3d"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.561650 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.607936 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-cd8np" podStartSLOduration=7.607910211 podStartE2EDuration="7.607910211s" podCreationTimestamp="2026-02-23 09:09:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:46.569250259 +0000 UTC m=+122.647564646" watchObservedRunningTime="2026-02-23 09:09:46.607910211 +0000 UTC m=+122.686224598" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.609176 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.613481 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-nzcfx" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.621368 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-c7c5g" event={"ID":"299c472a-f5bc-4330-a44e-82cb3490d9bd","Type":"ContainerStarted","Data":"3e545308d1f120efd4c1182129e34bf68c1a673b5b56e3d49bba87d973597687"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.621435 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-c7c5g" event={"ID":"299c472a-f5bc-4330-a44e-82cb3490d9bd","Type":"ContainerStarted","Data":"29902baf9502b8069ee39986016e90d3e8878d89e2464eefd59a165593a143f7"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.635054 4834 generic.go:334] "Generic (PLEG): container finished" podID="ad9b3cbe-a60c-43af-92e7-fb757f59162b" containerID="a290e3e0d0313d130a7fdc07eb32aa993e9865d54052d6038cae12a15b63e72e" exitCode=0 Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.635251 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lzrvp" event={"ID":"ad9b3cbe-a60c-43af-92e7-fb757f59162b","Type":"ContainerDied","Data":"a290e3e0d0313d130a7fdc07eb32aa993e9865d54052d6038cae12a15b63e72e"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.651128 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:46 crc kubenswrapper[4834]: E0223 09:09:46.655866 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:47.155843149 +0000 UTC m=+123.234157636 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.651269 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r2npz" podStartSLOduration=54.651244671 podStartE2EDuration="54.651244671s" podCreationTimestamp="2026-02-23 09:08:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:46.613224868 +0000 UTC m=+122.691539255" watchObservedRunningTime="2026-02-23 09:09:46.651244671 +0000 UTC m=+122.729559058" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.656893 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb" podStartSLOduration=54.656874027 podStartE2EDuration="54.656874027s" podCreationTimestamp="2026-02-23 09:08:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:46.636664997 +0000 UTC m=+122.714979414" watchObservedRunningTime="2026-02-23 09:09:46.656874027 +0000 UTC m=+122.735188414" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.667653 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.668252 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" podUID="c628e572-63b9-478b-bf3a-6ff1966480a1" containerName="route-controller-manager" containerID="cri-o://196a2a8abb6ff0a4f63033b3c41b8123d65b9b382205cbd5519e5687f9403ef3" gracePeriod=30 Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.670955 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2wpmg" event={"ID":"5608eb40-e8c9-4701-85ee-68b1cbd4b79c","Type":"ContainerStarted","Data":"0a902795a65aaaa8ea1433c94afa851b3c505a51cbaf59ad033c840342d02c9c"} Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.680779 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.682330 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jfn2h" podStartSLOduration=55.682292571 podStartE2EDuration="55.682292571s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:46.672625154 +0000 UTC m=+122.750939541" watchObservedRunningTime="2026-02-23 09:09:46.682292571 +0000 UTC m=+122.760606958" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.688997 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-2r5bx" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.694960 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-8pjt4" podStartSLOduration=55.694939792 podStartE2EDuration="55.694939792s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:46.693685238 +0000 UTC m=+122.771999635" watchObservedRunningTime="2026-02-23 09:09:46.694939792 +0000 UTC m=+122.773254179" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.723900 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.751711 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-xzc7c" podStartSLOduration=55.751690164 podStartE2EDuration="55.751690164s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:46.746759308 +0000 UTC m=+122.825073695" watchObservedRunningTime="2026-02-23 09:09:46.751690164 +0000 UTC m=+122.830004551" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.758111 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:46 crc kubenswrapper[4834]: E0223 09:09:46.760138 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:47.260109958 +0000 UTC m=+123.338424355 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.783036 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" podStartSLOduration=7.783014252 podStartE2EDuration="7.783014252s" podCreationTimestamp="2026-02-23 09:09:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:46.780595466 +0000 UTC m=+122.858909853" watchObservedRunningTime="2026-02-23 09:09:46.783014252 +0000 UTC m=+122.861328639" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.820882 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-897nm" podStartSLOduration=55.820851671 podStartE2EDuration="55.820851671s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:46.820175793 +0000 UTC m=+122.898490180" watchObservedRunningTime="2026-02-23 09:09:46.820851671 +0000 UTC m=+122.899166058" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.859833 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-proxy-ca-bundles\") pod \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\" (UID: \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\") " Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.859913 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-client-ca\") pod \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\" (UID: \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\") " Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.859986 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-config\") pod \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\" (UID: \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\") " Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.860016 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-serving-cert\") pod \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\" (UID: \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\") " Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.860064 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sd288\" (UniqueName: \"kubernetes.io/projected/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-kube-api-access-sd288\") pod \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\" (UID: \"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39\") " Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.860750 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:46 crc kubenswrapper[4834]: E0223 09:09:46.861170 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:47.361153378 +0000 UTC m=+123.439467775 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.862584 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-config" (OuterVolumeSpecName: "config") pod "dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39" (UID: "dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.865687 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-client-ca" (OuterVolumeSpecName: "client-ca") pod "dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39" (UID: "dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.866229 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39" (UID: "dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.901628 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-kube-api-access-sd288" (OuterVolumeSpecName: "kube-api-access-sd288") pod "dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39" (UID: "dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39"). InnerVolumeSpecName "kube-api-access-sd288". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.909200 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39" (UID: "dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.968530 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.969010 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sd288\" (UniqueName: \"kubernetes.io/projected/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-kube-api-access-sd288\") on node \"crc\" DevicePath \"\"" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.969036 4834 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.969051 4834 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-client-ca\") on node \"crc\" DevicePath \"\"" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.969062 4834 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-config\") on node \"crc\" DevicePath \"\"" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.969072 4834 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 23 09:09:46 crc kubenswrapper[4834]: E0223 09:09:46.969164 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:47.46913744 +0000 UTC m=+123.547451827 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.976086 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-c4657466b-wmdpr"] Feb 23 09:09:46 crc kubenswrapper[4834]: E0223 09:09:46.976381 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39" containerName="controller-manager" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.976417 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39" containerName="controller-manager" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.976541 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39" containerName="controller-manager" Feb 23 09:09:46 crc kubenswrapper[4834]: I0223 09:09:46.976986 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.001433 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-2wpmg" podStartSLOduration=55.001386974 podStartE2EDuration="55.001386974s" podCreationTimestamp="2026-02-23 09:08:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:46.9943785 +0000 UTC m=+123.072692887" watchObservedRunningTime="2026-02-23 09:09:47.001386974 +0000 UTC m=+123.079701371" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.002052 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-c4657466b-wmdpr"] Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.075925 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb5fc6af-049b-4814-94f0-4a414d59a6ab-config\") pod \"controller-manager-c4657466b-wmdpr\" (UID: \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\") " pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.076002 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb5fc6af-049b-4814-94f0-4a414d59a6ab-serving-cert\") pod \"controller-manager-c4657466b-wmdpr\" (UID: \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\") " pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.076031 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5sqx\" (UniqueName: \"kubernetes.io/projected/eb5fc6af-049b-4814-94f0-4a414d59a6ab-kube-api-access-q5sqx\") pod \"controller-manager-c4657466b-wmdpr\" (UID: \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\") " pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.076058 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.076085 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/eb5fc6af-049b-4814-94f0-4a414d59a6ab-proxy-ca-bundles\") pod \"controller-manager-c4657466b-wmdpr\" (UID: \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\") " pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.076121 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/eb5fc6af-049b-4814-94f0-4a414d59a6ab-client-ca\") pod \"controller-manager-c4657466b-wmdpr\" (UID: \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\") " pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" Feb 23 09:09:47 crc kubenswrapper[4834]: E0223 09:09:47.076907 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:47.576844355 +0000 UTC m=+123.655158742 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.133708 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-c7c5g" podStartSLOduration=55.13368034 podStartE2EDuration="55.13368034s" podCreationTimestamp="2026-02-23 09:08:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:47.094736071 +0000 UTC m=+123.173050458" watchObservedRunningTime="2026-02-23 09:09:47.13368034 +0000 UTC m=+123.211994727" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.178913 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.179125 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb5fc6af-049b-4814-94f0-4a414d59a6ab-serving-cert\") pod \"controller-manager-c4657466b-wmdpr\" (UID: \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\") " pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.179150 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5sqx\" (UniqueName: \"kubernetes.io/projected/eb5fc6af-049b-4814-94f0-4a414d59a6ab-kube-api-access-q5sqx\") pod \"controller-manager-c4657466b-wmdpr\" (UID: \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\") " pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.179190 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/eb5fc6af-049b-4814-94f0-4a414d59a6ab-proxy-ca-bundles\") pod \"controller-manager-c4657466b-wmdpr\" (UID: \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\") " pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.179223 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/eb5fc6af-049b-4814-94f0-4a414d59a6ab-client-ca\") pod \"controller-manager-c4657466b-wmdpr\" (UID: \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\") " pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.179247 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb5fc6af-049b-4814-94f0-4a414d59a6ab-config\") pod \"controller-manager-c4657466b-wmdpr\" (UID: \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\") " pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.180532 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb5fc6af-049b-4814-94f0-4a414d59a6ab-config\") pod \"controller-manager-c4657466b-wmdpr\" (UID: \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\") " pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" Feb 23 09:09:47 crc kubenswrapper[4834]: E0223 09:09:47.180762 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:47.680704682 +0000 UTC m=+123.759019089 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.181959 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/eb5fc6af-049b-4814-94f0-4a414d59a6ab-client-ca\") pod \"controller-manager-c4657466b-wmdpr\" (UID: \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\") " pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.182890 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/eb5fc6af-049b-4814-94f0-4a414d59a6ab-proxy-ca-bundles\") pod \"controller-manager-c4657466b-wmdpr\" (UID: \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\") " pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.208583 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb5fc6af-049b-4814-94f0-4a414d59a6ab-serving-cert\") pod \"controller-manager-c4657466b-wmdpr\" (UID: \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\") " pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.234308 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5sqx\" (UniqueName: \"kubernetes.io/projected/eb5fc6af-049b-4814-94f0-4a414d59a6ab-kube-api-access-q5sqx\") pod \"controller-manager-c4657466b-wmdpr\" (UID: \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\") " pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.270577 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-nzcfx"] Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.294890 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:47 crc kubenswrapper[4834]: E0223 09:09:47.295375 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:47.79536131 +0000 UTC m=+123.873675697 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.323341 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.397297 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:47 crc kubenswrapper[4834]: E0223 09:09:47.397710 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:47.897688826 +0000 UTC m=+123.976003213 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.480350 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.500665 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:47 crc kubenswrapper[4834]: E0223 09:09:47.501112 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:48.001088361 +0000 UTC m=+124.079402748 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.543354 4834 patch_prober.go:28] interesting pod/router-default-5444994796-rf5l7 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 23 09:09:47 crc kubenswrapper[4834]: [-]has-synced failed: reason withheld Feb 23 09:09:47 crc kubenswrapper[4834]: [+]process-running ok Feb 23 09:09:47 crc kubenswrapper[4834]: healthz check failed Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.543453 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rf5l7" podUID="f489a96f-1839-4986-9340-e9b9d8960435" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.601728 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c628e572-63b9-478b-bf3a-6ff1966480a1-client-ca\") pod \"c628e572-63b9-478b-bf3a-6ff1966480a1\" (UID: \"c628e572-63b9-478b-bf3a-6ff1966480a1\") " Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.601812 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c628e572-63b9-478b-bf3a-6ff1966480a1-config\") pod \"c628e572-63b9-478b-bf3a-6ff1966480a1\" (UID: \"c628e572-63b9-478b-bf3a-6ff1966480a1\") " Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.601941 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.601969 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c628e572-63b9-478b-bf3a-6ff1966480a1-serving-cert\") pod \"c628e572-63b9-478b-bf3a-6ff1966480a1\" (UID: \"c628e572-63b9-478b-bf3a-6ff1966480a1\") " Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.601991 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p6pts\" (UniqueName: \"kubernetes.io/projected/c628e572-63b9-478b-bf3a-6ff1966480a1-kube-api-access-p6pts\") pod \"c628e572-63b9-478b-bf3a-6ff1966480a1\" (UID: \"c628e572-63b9-478b-bf3a-6ff1966480a1\") " Feb 23 09:09:47 crc kubenswrapper[4834]: E0223 09:09:47.604563 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:48.104540998 +0000 UTC m=+124.182855385 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.605325 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c628e572-63b9-478b-bf3a-6ff1966480a1-config" (OuterVolumeSpecName: "config") pod "c628e572-63b9-478b-bf3a-6ff1966480a1" (UID: "c628e572-63b9-478b-bf3a-6ff1966480a1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.605582 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c628e572-63b9-478b-bf3a-6ff1966480a1-client-ca" (OuterVolumeSpecName: "client-ca") pod "c628e572-63b9-478b-bf3a-6ff1966480a1" (UID: "c628e572-63b9-478b-bf3a-6ff1966480a1"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.620298 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c628e572-63b9-478b-bf3a-6ff1966480a1-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c628e572-63b9-478b-bf3a-6ff1966480a1" (UID: "c628e572-63b9-478b-bf3a-6ff1966480a1"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.621925 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c628e572-63b9-478b-bf3a-6ff1966480a1-kube-api-access-p6pts" (OuterVolumeSpecName: "kube-api-access-p6pts") pod "c628e572-63b9-478b-bf3a-6ff1966480a1" (UID: "c628e572-63b9-478b-bf3a-6ff1966480a1"). InnerVolumeSpecName "kube-api-access-p6pts". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.708392 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.708499 4834 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c628e572-63b9-478b-bf3a-6ff1966480a1-config\") on node \"crc\" DevicePath \"\"" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.708512 4834 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c628e572-63b9-478b-bf3a-6ff1966480a1-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.708522 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p6pts\" (UniqueName: \"kubernetes.io/projected/c628e572-63b9-478b-bf3a-6ff1966480a1-kube-api-access-p6pts\") on node \"crc\" DevicePath \"\"" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.708532 4834 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c628e572-63b9-478b-bf3a-6ff1966480a1-client-ca\") on node \"crc\" DevicePath \"\"" Feb 23 09:09:47 crc kubenswrapper[4834]: E0223 09:09:47.708776 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:48.208756945 +0000 UTC m=+124.287071332 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.765971 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" event={"ID":"2c9de897-17f3-4444-ad95-b5e07b40f6c8","Type":"ContainerStarted","Data":"7eb81f4583ac1003fb2dad925150d48185d53433020a43b32683e5bb137ddd49"} Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.767674 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.783684 4834 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-hlc9q container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.30:8080/healthz\": dial tcp 10.217.0.30:8080: connect: connection refused" start-of-body= Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.783756 4834 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" podUID="2c9de897-17f3-4444-ad95-b5e07b40f6c8" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.30:8080/healthz\": dial tcp 10.217.0.30:8080: connect: connection refused" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.785847 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.786060 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-8mk2m" event={"ID":"dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39","Type":"ContainerDied","Data":"201ac388bf828fc084f26790dd7d6491b37140b5c8abe2cac813754c7e04f1d8"} Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.786142 4834 scope.go:117] "RemoveContainer" containerID="1afc1551d544cf2a7d08d6356b6ea290d5abce35e1f693735cfc3c93ecafc0db" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.812703 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:47 crc kubenswrapper[4834]: E0223 09:09:47.813082 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:48.313061706 +0000 UTC m=+124.391376093 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.847810 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9skpv" event={"ID":"a00a2272-643a-4144-aa4c-ff2d40639e8c","Type":"ContainerStarted","Data":"e763e48ee7361d367a3d9b16bad0af7adfc0649fe0d54bd46ae868a63a03d8b2"} Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.860878 4834 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-9skpv container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.860944 4834 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9skpv" podUID="a00a2272-643a-4144-aa4c-ff2d40639e8c" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.870525 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5kkqj" event={"ID":"8fba4843-d9f5-4156-8d0b-d0e764be8942","Type":"ContainerStarted","Data":"a41e7869fe6d514d1940fb22fc169bb864df6808eec1fb7c97b994d00a378b4d"} Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.870572 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5kkqj" event={"ID":"8fba4843-d9f5-4156-8d0b-d0e764be8942","Type":"ContainerStarted","Data":"a7d6c14240e3d6851ce5e6f1f444b8c510ac4f974412521665f5815b49b18035"} Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.883155 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" podStartSLOduration=55.883130957 podStartE2EDuration="55.883130957s" podCreationTimestamp="2026-02-23 09:08:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:47.791515818 +0000 UTC m=+123.869830235" watchObservedRunningTime="2026-02-23 09:09:47.883130957 +0000 UTC m=+123.961445344" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.884684 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-8mk2m"] Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.886387 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-c9zqb" event={"ID":"2e300c80-7530-4d72-acd8-4bfdca03c327","Type":"ContainerStarted","Data":"e77a3655f4b608c750680dc5cf67d76311020a7b85d687100225f00056ef6385"} Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.886450 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-c9zqb" event={"ID":"2e300c80-7530-4d72-acd8-4bfdca03c327","Type":"ContainerStarted","Data":"d79a4c4489b0a9b90b60bf7cb6e2187358761a6410f8280d6fa6e654e01150f9"} Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.887332 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-8mk2m"] Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.887383 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-c9zqb" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.889057 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-c4657466b-wmdpr"] Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.901907 4834 generic.go:334] "Generic (PLEG): container finished" podID="c628e572-63b9-478b-bf3a-6ff1966480a1" containerID="196a2a8abb6ff0a4f63033b3c41b8123d65b9b382205cbd5519e5687f9403ef3" exitCode=0 Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.902034 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" event={"ID":"c628e572-63b9-478b-bf3a-6ff1966480a1","Type":"ContainerDied","Data":"196a2a8abb6ff0a4f63033b3c41b8123d65b9b382205cbd5519e5687f9403ef3"} Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.902014 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.902133 4834 scope.go:117] "RemoveContainer" containerID="196a2a8abb6ff0a4f63033b3c41b8123d65b9b382205cbd5519e5687f9403ef3" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.902115 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt" event={"ID":"c628e572-63b9-478b-bf3a-6ff1966480a1","Type":"ContainerDied","Data":"b01ece9bcb110803fd09b8ad8d157d66ef39c1a73c1b1fb63417a6b2aeefc63d"} Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.906707 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-5kkqj" podStartSLOduration=55.90668197 podStartE2EDuration="55.90668197s" podCreationTimestamp="2026-02-23 09:08:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:47.903422449 +0000 UTC m=+123.981736836" watchObservedRunningTime="2026-02-23 09:09:47.90668197 +0000 UTC m=+123.984996357" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.917519 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:47 crc kubenswrapper[4834]: E0223 09:09:47.919741 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:48.419706731 +0000 UTC m=+124.498021118 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:47 crc kubenswrapper[4834]: W0223 09:09:47.920746 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeb5fc6af_049b_4814_94f0_4a414d59a6ab.slice/crio-dc723cf1b0c8b6dc29f266a83a7e972cbf88685c5826ed659c1eeb690edfbe79 WatchSource:0}: Error finding container dc723cf1b0c8b6dc29f266a83a7e972cbf88685c5826ed659c1eeb690edfbe79: Status 404 returned error can't find the container with id dc723cf1b0c8b6dc29f266a83a7e972cbf88685c5826ed659c1eeb690edfbe79 Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.928078 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-nzcfx" event={"ID":"4ceb8401-3a07-422f-ae4d-14366611f4a6","Type":"ContainerStarted","Data":"83bb34b88b0fe9736929d127dfbfbfff7a2d11ad55c99bee3cf0616b3344111b"} Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.947264 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-c9zqb" podStartSLOduration=8.947248664 podStartE2EDuration="8.947248664s" podCreationTimestamp="2026-02-23 09:09:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:47.94494315 +0000 UTC m=+124.023257537" watchObservedRunningTime="2026-02-23 09:09:47.947248664 +0000 UTC m=+124.025563051" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.949988 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vdn4d" event={"ID":"9707583d-7687-4854-8501-eacac0abff04","Type":"ContainerStarted","Data":"1dfc6cafb66f6becfa592ac24b435c814dd9b91c092442038a24526c74973707"} Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.951153 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vdn4d" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.960234 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-rlmdc"] Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.975071 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt"] Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.975781 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-495q2" event={"ID":"59d59459-2ab3-40d8-9cc4-f68e34377748","Type":"ContainerStarted","Data":"a040faa8eb58487583f268d67e82da6a75bfee4bc0a85c3035d0bd33935637a5"} Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.979241 4834 scope.go:117] "RemoveContainer" containerID="196a2a8abb6ff0a4f63033b3c41b8123d65b9b382205cbd5519e5687f9403ef3" Feb 23 09:09:47 crc kubenswrapper[4834]: E0223 09:09:47.982428 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"196a2a8abb6ff0a4f63033b3c41b8123d65b9b382205cbd5519e5687f9403ef3\": container with ID starting with 196a2a8abb6ff0a4f63033b3c41b8123d65b9b382205cbd5519e5687f9403ef3 not found: ID does not exist" containerID="196a2a8abb6ff0a4f63033b3c41b8123d65b9b382205cbd5519e5687f9403ef3" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.982507 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"196a2a8abb6ff0a4f63033b3c41b8123d65b9b382205cbd5519e5687f9403ef3"} err="failed to get container status \"196a2a8abb6ff0a4f63033b3c41b8123d65b9b382205cbd5519e5687f9403ef3\": rpc error: code = NotFound desc = could not find container \"196a2a8abb6ff0a4f63033b3c41b8123d65b9b382205cbd5519e5687f9403ef3\": container with ID starting with 196a2a8abb6ff0a4f63033b3c41b8123d65b9b382205cbd5519e5687f9403ef3 not found: ID does not exist" Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.983379 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-xslpt"] Feb 23 09:09:47 crc kubenswrapper[4834]: I0223 09:09:47.986717 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29530620-fvlp9" event={"ID":"59fb1571-215c-49da-a6ae-3c2152ef19f6","Type":"ContainerStarted","Data":"e304c7395613763c3888a1e1e9599f5db91ac5d7ef1d0289be521279e6cc2b3b"} Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.014550 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vdn4d" podStartSLOduration=56.014528338 podStartE2EDuration="56.014528338s" podCreationTimestamp="2026-02-23 09:08:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:47.995749848 +0000 UTC m=+124.074064245" watchObservedRunningTime="2026-02-23 09:09:48.014528338 +0000 UTC m=+124.092842725" Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.015109 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-ljtgg" event={"ID":"6b454f53-4bc6-4e49-a80d-2f31fe1dccb9","Type":"ContainerStarted","Data":"b294a21df1e9f30ff58543f26232ae2680b0a5b358422b4db4bbda2d701f7ba4"} Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.026607 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:48 crc kubenswrapper[4834]: E0223 09:09:48.028334 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:48.5283091 +0000 UTC m=+124.606623507 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.041384 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-495q2" podStartSLOduration=57.041362642 podStartE2EDuration="57.041362642s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:48.036854287 +0000 UTC m=+124.115168674" watchObservedRunningTime="2026-02-23 09:09:48.041362642 +0000 UTC m=+124.119677039" Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.041701 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-jfn2h" event={"ID":"2ba759e0-a281-467d-aa5a-7ac7d97c67fc","Type":"ContainerStarted","Data":"9a795dcaeea74fc424311c9fbded1ffb848c9a3b26c84d48213a8f656acf096e"} Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.050655 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rmcx" event={"ID":"0829b4ef-0b9e-49c6-ba38-daafdd8332ce","Type":"ContainerStarted","Data":"53ec3d4b3e06c8dfb04f3f7a9e0e5b928f88def5cccf0505e9c619cc010c6ec4"} Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.050705 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rmcx" event={"ID":"0829b4ef-0b9e-49c6-ba38-daafdd8332ce","Type":"ContainerStarted","Data":"638d08b85974a16074c4f2fc9fe659b966fc6f264318691ce84bc9588b77892d"} Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.065610 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-ljtgg" podStartSLOduration=9.065588623 podStartE2EDuration="9.065588623s" podCreationTimestamp="2026-02-23 09:09:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:48.0643907 +0000 UTC m=+124.142705087" watchObservedRunningTime="2026-02-23 09:09:48.065588623 +0000 UTC m=+124.143903010" Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.068933 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-m8kxw" event={"ID":"42d36f07-e27c-49e0-bad8-472e93d3875d","Type":"ContainerStarted","Data":"74dfe0a68b0b0b0b5c7fb5bcd38d348f3d57a3a54d4fe75e69381425d0138828"} Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.093524 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lzrvp" event={"ID":"ad9b3cbe-a60c-43af-92e7-fb757f59162b","Type":"ContainerStarted","Data":"17f15f0ee67e4309e230c2ecc15e9518219b971e2d432f3e24cee9d30abbe80a"} Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.093672 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lzrvp" Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.117103 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" event={"ID":"83a0c203-a9b8-462a-ba20-536641aa0721","Type":"ContainerStarted","Data":"596fcb3cf22ce52eed49904ca9b708527613c7459d8cc66043df0c0c5ba966d1"} Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.117158 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r2npz" Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.131796 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:48 crc kubenswrapper[4834]: E0223 09:09:48.132243 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:48.63222429 +0000 UTC m=+124.710538677 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.134665 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rmcx" podStartSLOduration=56.134641457 podStartE2EDuration="56.134641457s" podCreationTimestamp="2026-02-23 09:08:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:48.092851009 +0000 UTC m=+124.171165396" watchObservedRunningTime="2026-02-23 09:09:48.134641457 +0000 UTC m=+124.212955844" Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.167804 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-r2npz" Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.200597 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-m8kxw" podStartSLOduration=56.200576213 podStartE2EDuration="56.200576213s" podCreationTimestamp="2026-02-23 09:08:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:48.142657969 +0000 UTC m=+124.220972356" watchObservedRunningTime="2026-02-23 09:09:48.200576213 +0000 UTC m=+124.278890600" Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.201612 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lzrvp" podStartSLOduration=57.201605922 podStartE2EDuration="57.201605922s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:48.199604027 +0000 UTC m=+124.277918404" watchObservedRunningTime="2026-02-23 09:09:48.201605922 +0000 UTC m=+124.279920299" Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.234003 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:48 crc kubenswrapper[4834]: E0223 09:09:48.236457 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:48.736431767 +0000 UTC m=+124.814746164 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.346623 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:48 crc kubenswrapper[4834]: E0223 09:09:48.347543 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:48.847527016 +0000 UTC m=+124.925841403 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.448235 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:48 crc kubenswrapper[4834]: E0223 09:09:48.448874 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:48.948854384 +0000 UTC m=+125.027168771 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.550372 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:48 crc kubenswrapper[4834]: E0223 09:09:48.550794 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:49.050775407 +0000 UTC m=+125.129089794 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.558018 4834 patch_prober.go:28] interesting pod/router-default-5444994796-rf5l7 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 23 09:09:48 crc kubenswrapper[4834]: [-]has-synced failed: reason withheld Feb 23 09:09:48 crc kubenswrapper[4834]: [+]process-running ok Feb 23 09:09:48 crc kubenswrapper[4834]: healthz check failed Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.558090 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rf5l7" podUID="f489a96f-1839-4986-9340-e9b9d8960435" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.637120 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c628e572-63b9-478b-bf3a-6ff1966480a1" path="/var/lib/kubelet/pods/c628e572-63b9-478b-bf3a-6ff1966480a1/volumes" Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.637938 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39" path="/var/lib/kubelet/pods/dcbde5e8-97ff-42f6-85a9-9fb6f2ceee39/volumes" Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.651409 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:48 crc kubenswrapper[4834]: E0223 09:09:48.651731 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:49.151712464 +0000 UTC m=+125.230026841 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.762111 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:48 crc kubenswrapper[4834]: E0223 09:09:48.762572 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:49.262552466 +0000 UTC m=+125.340866853 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.878586 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:48 crc kubenswrapper[4834]: E0223 09:09:48.878854 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:49.378820839 +0000 UTC m=+125.457135226 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.879016 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:48 crc kubenswrapper[4834]: E0223 09:09:48.879477 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:49.379462396 +0000 UTC m=+125.457776783 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.931996 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-fg4xb" Feb 23 09:09:48 crc kubenswrapper[4834]: I0223 09:09:48.980326 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:48 crc kubenswrapper[4834]: E0223 09:09:48.980754 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:49.480730712 +0000 UTC m=+125.559045099 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.104988 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:49 crc kubenswrapper[4834]: E0223 09:09:49.105635 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:49.605621773 +0000 UTC m=+125.683936160 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.224126 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:49 crc kubenswrapper[4834]: E0223 09:09:49.224505 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:49.724486607 +0000 UTC m=+125.802800994 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.370839 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:49 crc kubenswrapper[4834]: E0223 09:09:49.371179 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:49.871166351 +0000 UTC m=+125.949480738 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.491274 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:49 crc kubenswrapper[4834]: E0223 09:09:49.491460 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:49.991387433 +0000 UTC m=+126.069701820 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.491595 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:49 crc kubenswrapper[4834]: E0223 09:09:49.491968 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:49.991951228 +0000 UTC m=+126.070265615 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.593746 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:49 crc kubenswrapper[4834]: E0223 09:09:49.594241 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:50.094224323 +0000 UTC m=+126.172538710 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.594338 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-nzcfx" event={"ID":"4ceb8401-3a07-422f-ae4d-14366611f4a6","Type":"ContainerStarted","Data":"54cdc4fe47cfaaf769cc9baa490e40f6ac93eca601031c838a29592368b81c5d"} Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.602733 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn"] Feb 23 09:09:49 crc kubenswrapper[4834]: E0223 09:09:49.602941 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c628e572-63b9-478b-bf3a-6ff1966480a1" containerName="route-controller-manager" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.602956 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="c628e572-63b9-478b-bf3a-6ff1966480a1" containerName="route-controller-manager" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.603034 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="c628e572-63b9-478b-bf3a-6ff1966480a1" containerName="route-controller-manager" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.603442 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.627528 4834 patch_prober.go:28] interesting pod/router-default-5444994796-rf5l7 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 23 09:09:49 crc kubenswrapper[4834]: [-]has-synced failed: reason withheld Feb 23 09:09:49 crc kubenswrapper[4834]: [+]process-running ok Feb 23 09:09:49 crc kubenswrapper[4834]: healthz check failed Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.627587 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rf5l7" podUID="f489a96f-1839-4986-9340-e9b9d8960435" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.663775 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-m8kxw" event={"ID":"42d36f07-e27c-49e0-bad8-472e93d3875d","Type":"ContainerStarted","Data":"49a636b37e6c5286429b7fe78f814e036733d2e64e20050d1e4a1d903aca1c01"} Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.739084 4834 generic.go:334] "Generic (PLEG): container finished" podID="59fb1571-215c-49da-a6ae-3c2152ef19f6" containerID="e304c7395613763c3888a1e1e9599f5db91ac5d7ef1d0289be521279e6cc2b3b" exitCode=0 Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.739228 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29530620-fvlp9" event={"ID":"59fb1571-215c-49da-a6ae-3c2152ef19f6","Type":"ContainerDied","Data":"e304c7395613763c3888a1e1e9599f5db91ac5d7ef1d0289be521279e6cc2b3b"} Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.752233 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:49 crc kubenswrapper[4834]: E0223 09:09:49.752611 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:50.252597101 +0000 UTC m=+126.330911488 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.786308 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.786469 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.786565 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.786770 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.786876 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.787050 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.787276 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn"] Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.790527 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" event={"ID":"eb5fc6af-049b-4814-94f0-4a414d59a6ab","Type":"ContainerStarted","Data":"1ba8c1f689c95ea9d67f4102e113d62896b84314d491998bc9dcf0c251a0277a"} Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.790561 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" event={"ID":"eb5fc6af-049b-4814-94f0-4a414d59a6ab","Type":"ContainerStarted","Data":"dc723cf1b0c8b6dc29f266a83a7e972cbf88685c5826ed659c1eeb690edfbe79"} Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.791094 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.794811 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" podUID="441917a1-296e-4529-a79f-458faf4769e6" containerName="kube-multus-additional-cni-plugins" containerID="cri-o://95ec29942c140fb247fc3ef2bd3bbf3e1ed417f5378a449fecd0d42c697f0d3d" gracePeriod=30 Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.800603 4834 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-lzrvp container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" start-of-body= Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.800664 4834 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lzrvp" podUID="ad9b3cbe-a60c-43af-92e7-fb757f59162b" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.13:8443/healthz\": dial tcp 10.217.0.13:8443: connect: connection refused" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.849312 4834 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-hlc9q container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.30:8080/healthz\": dial tcp 10.217.0.30:8080: connect: connection refused" start-of-body= Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.849372 4834 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" podUID="2c9de897-17f3-4444-ad95-b5e07b40f6c8" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.30:8080/healthz\": dial tcp 10.217.0.30:8080: connect: connection refused" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.853128 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.853359 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xdh7\" (UniqueName: \"kubernetes.io/projected/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-kube-api-access-8xdh7\") pod \"route-controller-manager-bc8b6d86d-zcksn\" (UID: \"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82\") " pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.853420 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-config\") pod \"route-controller-manager-bc8b6d86d-zcksn\" (UID: \"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82\") " pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.853449 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-client-ca\") pod \"route-controller-manager-bc8b6d86d-zcksn\" (UID: \"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82\") " pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.853504 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-serving-cert\") pod \"route-controller-manager-bc8b6d86d-zcksn\" (UID: \"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82\") " pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" Feb 23 09:09:49 crc kubenswrapper[4834]: E0223 09:09:49.867163 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:50.367132384 +0000 UTC m=+126.445446771 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.870511 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-nzcfx" podStartSLOduration=58.870492568 podStartE2EDuration="58.870492568s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:49.869793328 +0000 UTC m=+125.948107715" watchObservedRunningTime="2026-02-23 09:09:49.870492568 +0000 UTC m=+125.948806955" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.880784 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-9skpv" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.955291 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-serving-cert\") pod \"route-controller-manager-bc8b6d86d-zcksn\" (UID: \"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82\") " pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.957846 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xdh7\" (UniqueName: \"kubernetes.io/projected/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-kube-api-access-8xdh7\") pod \"route-controller-manager-bc8b6d86d-zcksn\" (UID: \"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82\") " pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.957881 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.957888 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.957913 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-config\") pod \"route-controller-manager-bc8b6d86d-zcksn\" (UID: \"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82\") " pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.957953 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-client-ca\") pod \"route-controller-manager-bc8b6d86d-zcksn\" (UID: \"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82\") " pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.959564 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-client-ca\") pod \"route-controller-manager-bc8b6d86d-zcksn\" (UID: \"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82\") " pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" Feb 23 09:09:49 crc kubenswrapper[4834]: E0223 09:09:49.962659 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:50.462624061 +0000 UTC m=+126.540938448 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:49 crc kubenswrapper[4834]: I0223 09:09:49.964234 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-config\") pod \"route-controller-manager-bc8b6d86d-zcksn\" (UID: \"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82\") " pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.037776 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-serving-cert\") pod \"route-controller-manager-bc8b6d86d-zcksn\" (UID: \"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82\") " pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.041451 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xdh7\" (UniqueName: \"kubernetes.io/projected/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-kube-api-access-8xdh7\") pod \"route-controller-manager-bc8b6d86d-zcksn\" (UID: \"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82\") " pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.144744 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:50 crc kubenswrapper[4834]: E0223 09:09:50.145593 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:50.645566831 +0000 UTC m=+126.723881218 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.146681 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jw46p"] Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.146960 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.149155 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jw46p" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.151978 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.157997 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jw46p"] Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.210238 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5vvxt"] Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.211663 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5vvxt" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.229330 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5vvxt"] Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.249691 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7l2f5\" (UniqueName: \"kubernetes.io/projected/19479380-b603-400a-99e9-6b8186f42f33-kube-api-access-7l2f5\") pod \"certified-operators-5vvxt\" (UID: \"19479380-b603-400a-99e9-6b8186f42f33\") " pod="openshift-marketplace/certified-operators-5vvxt" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.249757 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67be3aab-67ec-42d2-9158-efe9b6ee13e7-catalog-content\") pod \"community-operators-jw46p\" (UID: \"67be3aab-67ec-42d2-9158-efe9b6ee13e7\") " pod="openshift-marketplace/community-operators-jw46p" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.249803 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19479380-b603-400a-99e9-6b8186f42f33-catalog-content\") pod \"certified-operators-5vvxt\" (UID: \"19479380-b603-400a-99e9-6b8186f42f33\") " pod="openshift-marketplace/certified-operators-5vvxt" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.249824 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67be3aab-67ec-42d2-9158-efe9b6ee13e7-utilities\") pod \"community-operators-jw46p\" (UID: \"67be3aab-67ec-42d2-9158-efe9b6ee13e7\") " pod="openshift-marketplace/community-operators-jw46p" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.249867 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19479380-b603-400a-99e9-6b8186f42f33-utilities\") pod \"certified-operators-5vvxt\" (UID: \"19479380-b603-400a-99e9-6b8186f42f33\") " pod="openshift-marketplace/certified-operators-5vvxt" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.249894 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89srs\" (UniqueName: \"kubernetes.io/projected/67be3aab-67ec-42d2-9158-efe9b6ee13e7-kube-api-access-89srs\") pod \"community-operators-jw46p\" (UID: \"67be3aab-67ec-42d2-9158-efe9b6ee13e7\") " pod="openshift-marketplace/community-operators-jw46p" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.249917 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:50 crc kubenswrapper[4834]: E0223 09:09:50.250219 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:50.75020622 +0000 UTC m=+126.828520607 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.317643 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.350713 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.350826 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67be3aab-67ec-42d2-9158-efe9b6ee13e7-catalog-content\") pod \"community-operators-jw46p\" (UID: \"67be3aab-67ec-42d2-9158-efe9b6ee13e7\") " pod="openshift-marketplace/community-operators-jw46p" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.350888 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19479380-b603-400a-99e9-6b8186f42f33-catalog-content\") pod \"certified-operators-5vvxt\" (UID: \"19479380-b603-400a-99e9-6b8186f42f33\") " pod="openshift-marketplace/certified-operators-5vvxt" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.350920 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67be3aab-67ec-42d2-9158-efe9b6ee13e7-utilities\") pod \"community-operators-jw46p\" (UID: \"67be3aab-67ec-42d2-9158-efe9b6ee13e7\") " pod="openshift-marketplace/community-operators-jw46p" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.350976 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19479380-b603-400a-99e9-6b8186f42f33-utilities\") pod \"certified-operators-5vvxt\" (UID: \"19479380-b603-400a-99e9-6b8186f42f33\") " pod="openshift-marketplace/certified-operators-5vvxt" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.351012 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89srs\" (UniqueName: \"kubernetes.io/projected/67be3aab-67ec-42d2-9158-efe9b6ee13e7-kube-api-access-89srs\") pod \"community-operators-jw46p\" (UID: \"67be3aab-67ec-42d2-9158-efe9b6ee13e7\") " pod="openshift-marketplace/community-operators-jw46p" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.351053 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7l2f5\" (UniqueName: \"kubernetes.io/projected/19479380-b603-400a-99e9-6b8186f42f33-kube-api-access-7l2f5\") pod \"certified-operators-5vvxt\" (UID: \"19479380-b603-400a-99e9-6b8186f42f33\") " pod="openshift-marketplace/certified-operators-5vvxt" Feb 23 09:09:50 crc kubenswrapper[4834]: E0223 09:09:50.351561 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:50.851539648 +0000 UTC m=+126.929854035 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.351954 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67be3aab-67ec-42d2-9158-efe9b6ee13e7-catalog-content\") pod \"community-operators-jw46p\" (UID: \"67be3aab-67ec-42d2-9158-efe9b6ee13e7\") " pod="openshift-marketplace/community-operators-jw46p" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.352433 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19479380-b603-400a-99e9-6b8186f42f33-catalog-content\") pod \"certified-operators-5vvxt\" (UID: \"19479380-b603-400a-99e9-6b8186f42f33\") " pod="openshift-marketplace/certified-operators-5vvxt" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.352691 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67be3aab-67ec-42d2-9158-efe9b6ee13e7-utilities\") pod \"community-operators-jw46p\" (UID: \"67be3aab-67ec-42d2-9158-efe9b6ee13e7\") " pod="openshift-marketplace/community-operators-jw46p" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.352932 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19479380-b603-400a-99e9-6b8186f42f33-utilities\") pod \"certified-operators-5vvxt\" (UID: \"19479380-b603-400a-99e9-6b8186f42f33\") " pod="openshift-marketplace/certified-operators-5vvxt" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.437021 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" podStartSLOduration=5.437002566 podStartE2EDuration="5.437002566s" podCreationTimestamp="2026-02-23 09:09:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:50.268019243 +0000 UTC m=+126.346333630" watchObservedRunningTime="2026-02-23 09:09:50.437002566 +0000 UTC m=+126.515316953" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.453832 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:50 crc kubenswrapper[4834]: E0223 09:09:50.454212 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:50.954198723 +0000 UTC m=+127.032513110 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.473452 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.473606 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.475790 4834 patch_prober.go:28] interesting pod/console-f9d7485db-g9jpx container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.29:8443/health\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.475860 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-g9jpx" podUID="24eb6775-1135-4cc7-9e62-103e142f285a" containerName="console" probeResult="failure" output="Get \"https://10.217.0.29:8443/health\": dial tcp 10.217.0.29:8443: connect: connection refused" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.494193 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7l2f5\" (UniqueName: \"kubernetes.io/projected/19479380-b603-400a-99e9-6b8186f42f33-kube-api-access-7l2f5\") pod \"certified-operators-5vvxt\" (UID: \"19479380-b603-400a-99e9-6b8186f42f33\") " pod="openshift-marketplace/certified-operators-5vvxt" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.534547 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89srs\" (UniqueName: \"kubernetes.io/projected/67be3aab-67ec-42d2-9158-efe9b6ee13e7-kube-api-access-89srs\") pod \"community-operators-jw46p\" (UID: \"67be3aab-67ec-42d2-9158-efe9b6ee13e7\") " pod="openshift-marketplace/community-operators-jw46p" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.560947 4834 patch_prober.go:28] interesting pod/router-default-5444994796-rf5l7 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 23 09:09:50 crc kubenswrapper[4834]: [-]has-synced failed: reason withheld Feb 23 09:09:50 crc kubenswrapper[4834]: [+]process-running ok Feb 23 09:09:50 crc kubenswrapper[4834]: healthz check failed Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.561107 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rf5l7" podUID="f489a96f-1839-4986-9340-e9b9d8960435" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.564559 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-kk9xp"] Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.564836 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:50 crc kubenswrapper[4834]: E0223 09:09:50.566623 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:51.066603147 +0000 UTC m=+127.144917534 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.569280 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jw46p" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.570873 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kk9xp"] Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.571018 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kk9xp" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.644736 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2qm7f"] Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.645682 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2qm7f"] Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.645773 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2qm7f" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.706873 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:50 crc kubenswrapper[4834]: E0223 09:09:50.707509 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:51.207488101 +0000 UTC m=+127.285802488 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.719925 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.720283 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.757779 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5vvxt" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.779509 4834 patch_prober.go:28] interesting pod/apiserver-76f77b778f-897nm container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Feb 23 09:09:50 crc kubenswrapper[4834]: [+]log ok Feb 23 09:09:50 crc kubenswrapper[4834]: [+]etcd ok Feb 23 09:09:50 crc kubenswrapper[4834]: [+]poststarthook/start-apiserver-admission-initializer ok Feb 23 09:09:50 crc kubenswrapper[4834]: [+]poststarthook/generic-apiserver-start-informers ok Feb 23 09:09:50 crc kubenswrapper[4834]: [+]poststarthook/max-in-flight-filter ok Feb 23 09:09:50 crc kubenswrapper[4834]: [+]poststarthook/storage-object-count-tracker-hook ok Feb 23 09:09:50 crc kubenswrapper[4834]: [+]poststarthook/image.openshift.io-apiserver-caches ok Feb 23 09:09:50 crc kubenswrapper[4834]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Feb 23 09:09:50 crc kubenswrapper[4834]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Feb 23 09:09:50 crc kubenswrapper[4834]: [+]poststarthook/project.openshift.io-projectcache ok Feb 23 09:09:50 crc kubenswrapper[4834]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Feb 23 09:09:50 crc kubenswrapper[4834]: [+]poststarthook/openshift.io-startinformers ok Feb 23 09:09:50 crc kubenswrapper[4834]: [+]poststarthook/openshift.io-restmapperupdater ok Feb 23 09:09:50 crc kubenswrapper[4834]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Feb 23 09:09:50 crc kubenswrapper[4834]: livez check failed Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.780115 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-897nm" podUID="782436e6-7d7f-4e44-afe6-542014b15e86" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.815338 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.815644 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ad50479-c17b-4e80-b57a-ef039e81c612-catalog-content\") pod \"community-operators-kk9xp\" (UID: \"4ad50479-c17b-4e80-b57a-ef039e81c612\") " pod="openshift-marketplace/community-operators-kk9xp" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.815720 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f51b8d1c-1783-401f-b9b7-aef3b5bbab05-catalog-content\") pod \"certified-operators-2qm7f\" (UID: \"f51b8d1c-1783-401f-b9b7-aef3b5bbab05\") " pod="openshift-marketplace/certified-operators-2qm7f" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.815761 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zfb2q\" (UniqueName: \"kubernetes.io/projected/f51b8d1c-1783-401f-b9b7-aef3b5bbab05-kube-api-access-zfb2q\") pod \"certified-operators-2qm7f\" (UID: \"f51b8d1c-1783-401f-b9b7-aef3b5bbab05\") " pod="openshift-marketplace/certified-operators-2qm7f" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.815785 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f51b8d1c-1783-401f-b9b7-aef3b5bbab05-utilities\") pod \"certified-operators-2qm7f\" (UID: \"f51b8d1c-1783-401f-b9b7-aef3b5bbab05\") " pod="openshift-marketplace/certified-operators-2qm7f" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.815821 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ad50479-c17b-4e80-b57a-ef039e81c612-utilities\") pod \"community-operators-kk9xp\" (UID: \"4ad50479-c17b-4e80-b57a-ef039e81c612\") " pod="openshift-marketplace/community-operators-kk9xp" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.815846 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ktfgk\" (UniqueName: \"kubernetes.io/projected/4ad50479-c17b-4e80-b57a-ef039e81c612-kube-api-access-ktfgk\") pod \"community-operators-kk9xp\" (UID: \"4ad50479-c17b-4e80-b57a-ef039e81c612\") " pod="openshift-marketplace/community-operators-kk9xp" Feb 23 09:09:50 crc kubenswrapper[4834]: E0223 09:09:50.815971 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:51.315950357 +0000 UTC m=+127.394264754 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.825891 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-nzcfx" event={"ID":"4ceb8401-3a07-422f-ae4d-14366611f4a6","Type":"ContainerStarted","Data":"627a3a40b6ad75f720b92cb1bf9ab137c8f43901a6b8ab77bbdf869010cdbc50"} Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.915335 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" event={"ID":"83a0c203-a9b8-462a-ba20-536641aa0721","Type":"ContainerStarted","Data":"44221eed4a50726145c597de354a3452448be59f6bec0ee7b9d77ac2829c942e"} Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.920167 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f51b8d1c-1783-401f-b9b7-aef3b5bbab05-catalog-content\") pod \"certified-operators-2qm7f\" (UID: \"f51b8d1c-1783-401f-b9b7-aef3b5bbab05\") " pod="openshift-marketplace/certified-operators-2qm7f" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.920242 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zfb2q\" (UniqueName: \"kubernetes.io/projected/f51b8d1c-1783-401f-b9b7-aef3b5bbab05-kube-api-access-zfb2q\") pod \"certified-operators-2qm7f\" (UID: \"f51b8d1c-1783-401f-b9b7-aef3b5bbab05\") " pod="openshift-marketplace/certified-operators-2qm7f" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.920280 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f51b8d1c-1783-401f-b9b7-aef3b5bbab05-utilities\") pod \"certified-operators-2qm7f\" (UID: \"f51b8d1c-1783-401f-b9b7-aef3b5bbab05\") " pod="openshift-marketplace/certified-operators-2qm7f" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.920351 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ad50479-c17b-4e80-b57a-ef039e81c612-utilities\") pod \"community-operators-kk9xp\" (UID: \"4ad50479-c17b-4e80-b57a-ef039e81c612\") " pod="openshift-marketplace/community-operators-kk9xp" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.920649 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ktfgk\" (UniqueName: \"kubernetes.io/projected/4ad50479-c17b-4e80-b57a-ef039e81c612-kube-api-access-ktfgk\") pod \"community-operators-kk9xp\" (UID: \"4ad50479-c17b-4e80-b57a-ef039e81c612\") " pod="openshift-marketplace/community-operators-kk9xp" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.920733 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.920802 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ad50479-c17b-4e80-b57a-ef039e81c612-catalog-content\") pod \"community-operators-kk9xp\" (UID: \"4ad50479-c17b-4e80-b57a-ef039e81c612\") " pod="openshift-marketplace/community-operators-kk9xp" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.923183 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ad50479-c17b-4e80-b57a-ef039e81c612-utilities\") pod \"community-operators-kk9xp\" (UID: \"4ad50479-c17b-4e80-b57a-ef039e81c612\") " pod="openshift-marketplace/community-operators-kk9xp" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.925441 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f51b8d1c-1783-401f-b9b7-aef3b5bbab05-catalog-content\") pod \"certified-operators-2qm7f\" (UID: \"f51b8d1c-1783-401f-b9b7-aef3b5bbab05\") " pod="openshift-marketplace/certified-operators-2qm7f" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.928123 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f51b8d1c-1783-401f-b9b7-aef3b5bbab05-utilities\") pod \"certified-operators-2qm7f\" (UID: \"f51b8d1c-1783-401f-b9b7-aef3b5bbab05\") " pod="openshift-marketplace/certified-operators-2qm7f" Feb 23 09:09:50 crc kubenswrapper[4834]: E0223 09:09:50.928347 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:51.428330511 +0000 UTC m=+127.506644898 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.928930 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ad50479-c17b-4e80-b57a-ef039e81c612-catalog-content\") pod \"community-operators-kk9xp\" (UID: \"4ad50479-c17b-4e80-b57a-ef039e81c612\") " pod="openshift-marketplace/community-operators-kk9xp" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.964558 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ktfgk\" (UniqueName: \"kubernetes.io/projected/4ad50479-c17b-4e80-b57a-ef039e81c612-kube-api-access-ktfgk\") pod \"community-operators-kk9xp\" (UID: \"4ad50479-c17b-4e80-b57a-ef039e81c612\") " pod="openshift-marketplace/community-operators-kk9xp" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.969043 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zfb2q\" (UniqueName: \"kubernetes.io/projected/f51b8d1c-1783-401f-b9b7-aef3b5bbab05-kube-api-access-zfb2q\") pod \"certified-operators-2qm7f\" (UID: \"f51b8d1c-1783-401f-b9b7-aef3b5bbab05\") " pod="openshift-marketplace/certified-operators-2qm7f" Feb 23 09:09:50 crc kubenswrapper[4834]: I0223 09:09:50.988709 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.031793 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:51 crc kubenswrapper[4834]: E0223 09:09:51.032904 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:51.532882788 +0000 UTC m=+127.611197175 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.037016 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2qm7f" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.139375 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:51 crc kubenswrapper[4834]: E0223 09:09:51.147534 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:51.647500484 +0000 UTC m=+127.725814861 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.219577 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.220448 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.224433 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.224651 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.231089 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.242945 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.243139 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1e26514e-012d-41fc-b0a3-b7683bcf7b61-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"1e26514e-012d-41fc-b0a3-b7683bcf7b61\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.243239 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1e26514e-012d-41fc-b0a3-b7683bcf7b61-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"1e26514e-012d-41fc-b0a3-b7683bcf7b61\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 23 09:09:51 crc kubenswrapper[4834]: E0223 09:09:51.243342 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:51.74332261 +0000 UTC m=+127.821636997 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.246855 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kk9xp" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.329975 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn"] Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.344233 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.344360 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1e26514e-012d-41fc-b0a3-b7683bcf7b61-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"1e26514e-012d-41fc-b0a3-b7683bcf7b61\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.344433 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1e26514e-012d-41fc-b0a3-b7683bcf7b61-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"1e26514e-012d-41fc-b0a3-b7683bcf7b61\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 23 09:09:51 crc kubenswrapper[4834]: E0223 09:09:51.345182 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:51.845167502 +0000 UTC m=+127.923481889 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.345371 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1e26514e-012d-41fc-b0a3-b7683bcf7b61-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"1e26514e-012d-41fc-b0a3-b7683bcf7b61\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.386100 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1e26514e-012d-41fc-b0a3-b7683bcf7b61-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"1e26514e-012d-41fc-b0a3-b7683bcf7b61\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.409088 4834 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.456197 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:51 crc kubenswrapper[4834]: E0223 09:09:51.456808 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:51.956782135 +0000 UTC m=+128.035096522 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.465607 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-lzrvp" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.491699 4834 patch_prober.go:28] interesting pod/downloads-7954f5f757-mxsss container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.491795 4834 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-mxsss" podUID="fbca2b49-c933-408c-9c80-fb1202bfb6f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.492348 4834 patch_prober.go:28] interesting pod/downloads-7954f5f757-mxsss container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.492369 4834 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-mxsss" podUID="fbca2b49-c933-408c-9c80-fb1202bfb6f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.545697 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-rf5l7" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.554584 4834 patch_prober.go:28] interesting pod/router-default-5444994796-rf5l7 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 23 09:09:51 crc kubenswrapper[4834]: [-]has-synced failed: reason withheld Feb 23 09:09:51 crc kubenswrapper[4834]: [+]process-running ok Feb 23 09:09:51 crc kubenswrapper[4834]: healthz check failed Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.554649 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rf5l7" podUID="f489a96f-1839-4986-9340-e9b9d8960435" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.557870 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:51 crc kubenswrapper[4834]: E0223 09:09:51.558375 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:52.058358399 +0000 UTC m=+128.136672786 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.574649 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.612461 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5vvxt"] Feb 23 09:09:51 crc kubenswrapper[4834]: W0223 09:09:51.648566 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod19479380_b603_400a_99e9_6b8186f42f33.slice/crio-8b036bc0faf2489dd8e6c11c8acdf8c83c992b630d221efb50492e1d3c453ba9 WatchSource:0}: Error finding container 8b036bc0faf2489dd8e6c11c8acdf8c83c992b630d221efb50492e1d3c453ba9: Status 404 returned error can't find the container with id 8b036bc0faf2489dd8e6c11c8acdf8c83c992b630d221efb50492e1d3c453ba9 Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.658765 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.660038 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29530620-fvlp9" Feb 23 09:09:51 crc kubenswrapper[4834]: E0223 09:09:51.660281 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-02-23 09:09:52.160256312 +0000 UTC m=+128.238570769 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.719665 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jw46p"] Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.722383 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2qm7f"] Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.760436 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5kdz\" (UniqueName: \"kubernetes.io/projected/59fb1571-215c-49da-a6ae-3c2152ef19f6-kube-api-access-q5kdz\") pod \"59fb1571-215c-49da-a6ae-3c2152ef19f6\" (UID: \"59fb1571-215c-49da-a6ae-3c2152ef19f6\") " Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.760510 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/59fb1571-215c-49da-a6ae-3c2152ef19f6-secret-volume\") pod \"59fb1571-215c-49da-a6ae-3c2152ef19f6\" (UID: \"59fb1571-215c-49da-a6ae-3c2152ef19f6\") " Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.760587 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/59fb1571-215c-49da-a6ae-3c2152ef19f6-config-volume\") pod \"59fb1571-215c-49da-a6ae-3c2152ef19f6\" (UID: \"59fb1571-215c-49da-a6ae-3c2152ef19f6\") " Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.761650 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/59fb1571-215c-49da-a6ae-3c2152ef19f6-config-volume" (OuterVolumeSpecName: "config-volume") pod "59fb1571-215c-49da-a6ae-3c2152ef19f6" (UID: "59fb1571-215c-49da-a6ae-3c2152ef19f6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.762196 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:51 crc kubenswrapper[4834]: E0223 09:09:51.762549 4834 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-02-23 09:09:52.262537457 +0000 UTC m=+128.340851844 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-rcwhk" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.762709 4834 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/59fb1571-215c-49da-a6ae-3c2152ef19f6-config-volume\") on node \"crc\" DevicePath \"\"" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.777120 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/59fb1571-215c-49da-a6ae-3c2152ef19f6-kube-api-access-q5kdz" (OuterVolumeSpecName: "kube-api-access-q5kdz") pod "59fb1571-215c-49da-a6ae-3c2152ef19f6" (UID: "59fb1571-215c-49da-a6ae-3c2152ef19f6"). InnerVolumeSpecName "kube-api-access-q5kdz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.780015 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/59fb1571-215c-49da-a6ae-3c2152ef19f6-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "59fb1571-215c-49da-a6ae-3c2152ef19f6" (UID: "59fb1571-215c-49da-a6ae-3c2152ef19f6"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.824471 4834 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-02-23T09:09:51.409121654Z","Handler":null,"Name":""} Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.831383 4834 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.831449 4834 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.843987 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-kk9xp"] Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.863927 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.864784 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5kdz\" (UniqueName: \"kubernetes.io/projected/59fb1571-215c-49da-a6ae-3c2152ef19f6-kube-api-access-q5kdz\") on node \"crc\" DevicePath \"\"" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.864806 4834 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/59fb1571-215c-49da-a6ae-3c2152ef19f6-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.869516 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.921571 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.952382 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29530620-fvlp9" event={"ID":"59fb1571-215c-49da-a6ae-3c2152ef19f6","Type":"ContainerDied","Data":"dd43f72b5c79682ab59e65203268340cae860417b1a42a1fa7190a5030324c1a"} Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.952448 4834 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd43f72b5c79682ab59e65203268340cae860417b1a42a1fa7190a5030324c1a" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.952525 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29530620-fvlp9" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.958644 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" event={"ID":"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82","Type":"ContainerStarted","Data":"ebfc8e187bca5f04d76de2f6e83a3b597e682288b2fbd0ded152f1bc7c47943b"} Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.958706 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" event={"ID":"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82","Type":"ContainerStarted","Data":"e8e60d84d15a5155c3776f61cdbe18143dd9f0588773927af5c5f9e7551a36cf"} Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.959023 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.968896 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kk9xp" event={"ID":"4ad50479-c17b-4e80-b57a-ef039e81c612","Type":"ContainerStarted","Data":"242c1377008eec11bb58cf77c93fef8e8fea62a361a086042e005dc9fd173fa3"} Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.970502 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.975268 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2qm7f" event={"ID":"f51b8d1c-1783-401f-b9b7-aef3b5bbab05","Type":"ContainerStarted","Data":"14579291ace62cc943e0018e2e687fc3d35611b23b0204154c303be72fde7126"} Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.978820 4834 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.978878 4834 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.987628 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jw46p" event={"ID":"67be3aab-67ec-42d2-9158-efe9b6ee13e7","Type":"ContainerStarted","Data":"1e548dee2426db2f2a28dd4bcfe67049a304951a507201c8a78c44dbc83b8fcc"} Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.995491 4834 generic.go:334] "Generic (PLEG): container finished" podID="19479380-b603-400a-99e9-6b8186f42f33" containerID="de41d4bc6c3994a8cc9bee90e631ff724658bd9ee4dd20239a95081e3acc547e" exitCode=0 Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.995573 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5vvxt" event={"ID":"19479380-b603-400a-99e9-6b8186f42f33","Type":"ContainerDied","Data":"de41d4bc6c3994a8cc9bee90e631ff724658bd9ee4dd20239a95081e3acc547e"} Feb 23 09:09:51 crc kubenswrapper[4834]: I0223 09:09:51.995606 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5vvxt" event={"ID":"19479380-b603-400a-99e9-6b8186f42f33","Type":"ContainerStarted","Data":"8b036bc0faf2489dd8e6c11c8acdf8c83c992b630d221efb50492e1d3c453ba9"} Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.000810 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" podStartSLOduration=7.000784109 podStartE2EDuration="7.000784109s" podCreationTimestamp="2026-02-23 09:09:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:51.996882201 +0000 UTC m=+128.075196588" watchObservedRunningTime="2026-02-23 09:09:52.000784109 +0000 UTC m=+128.079098496" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.012266 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" event={"ID":"83a0c203-a9b8-462a-ba20-536641aa0721","Type":"ContainerStarted","Data":"e3bae77e2d01adb929da245de784184e2d2e18bae80a457cf25b0e0a511afa07"} Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.012985 4834 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.062643 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.063364 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-rcwhk\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.102942 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" podStartSLOduration=13.102923999 podStartE2EDuration="13.102923999s" podCreationTimestamp="2026-02-23 09:09:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:52.073026801 +0000 UTC m=+128.151341188" watchObservedRunningTime="2026-02-23 09:09:52.102923999 +0000 UTC m=+128.181238376" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.202854 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7vzgv"] Feb 23 09:09:52 crc kubenswrapper[4834]: E0223 09:09:52.203620 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="59fb1571-215c-49da-a6ae-3c2152ef19f6" containerName="collect-profiles" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.203642 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="59fb1571-215c-49da-a6ae-3c2152ef19f6" containerName="collect-profiles" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.203751 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="59fb1571-215c-49da-a6ae-3c2152ef19f6" containerName="collect-profiles" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.204490 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7vzgv" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.207065 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.221000 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7vzgv"] Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.351201 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.359058 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.397299 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d37845a1-60c4-4708-b671-42d20f6a9b34-catalog-content\") pod \"redhat-marketplace-7vzgv\" (UID: \"d37845a1-60c4-4708-b671-42d20f6a9b34\") " pod="openshift-marketplace/redhat-marketplace-7vzgv" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.397369 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2gth\" (UniqueName: \"kubernetes.io/projected/d37845a1-60c4-4708-b671-42d20f6a9b34-kube-api-access-r2gth\") pod \"redhat-marketplace-7vzgv\" (UID: \"d37845a1-60c4-4708-b671-42d20f6a9b34\") " pod="openshift-marketplace/redhat-marketplace-7vzgv" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.397391 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d37845a1-60c4-4708-b671-42d20f6a9b34-utilities\") pod \"redhat-marketplace-7vzgv\" (UID: \"d37845a1-60c4-4708-b671-42d20f6a9b34\") " pod="openshift-marketplace/redhat-marketplace-7vzgv" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.498229 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2gth\" (UniqueName: \"kubernetes.io/projected/d37845a1-60c4-4708-b671-42d20f6a9b34-kube-api-access-r2gth\") pod \"redhat-marketplace-7vzgv\" (UID: \"d37845a1-60c4-4708-b671-42d20f6a9b34\") " pod="openshift-marketplace/redhat-marketplace-7vzgv" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.498277 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d37845a1-60c4-4708-b671-42d20f6a9b34-utilities\") pod \"redhat-marketplace-7vzgv\" (UID: \"d37845a1-60c4-4708-b671-42d20f6a9b34\") " pod="openshift-marketplace/redhat-marketplace-7vzgv" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.498374 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d37845a1-60c4-4708-b671-42d20f6a9b34-catalog-content\") pod \"redhat-marketplace-7vzgv\" (UID: \"d37845a1-60c4-4708-b671-42d20f6a9b34\") " pod="openshift-marketplace/redhat-marketplace-7vzgv" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.499283 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d37845a1-60c4-4708-b671-42d20f6a9b34-catalog-content\") pod \"redhat-marketplace-7vzgv\" (UID: \"d37845a1-60c4-4708-b671-42d20f6a9b34\") " pod="openshift-marketplace/redhat-marketplace-7vzgv" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.499859 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d37845a1-60c4-4708-b671-42d20f6a9b34-utilities\") pod \"redhat-marketplace-7vzgv\" (UID: \"d37845a1-60c4-4708-b671-42d20f6a9b34\") " pod="openshift-marketplace/redhat-marketplace-7vzgv" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.521925 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2gth\" (UniqueName: \"kubernetes.io/projected/d37845a1-60c4-4708-b671-42d20f6a9b34-kube-api-access-r2gth\") pod \"redhat-marketplace-7vzgv\" (UID: \"d37845a1-60c4-4708-b671-42d20f6a9b34\") " pod="openshift-marketplace/redhat-marketplace-7vzgv" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.542654 4834 patch_prober.go:28] interesting pod/router-default-5444994796-rf5l7 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 23 09:09:52 crc kubenswrapper[4834]: [-]has-synced failed: reason withheld Feb 23 09:09:52 crc kubenswrapper[4834]: [+]process-running ok Feb 23 09:09:52 crc kubenswrapper[4834]: healthz check failed Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.542990 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rf5l7" podUID="f489a96f-1839-4986-9340-e9b9d8960435" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.599646 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.600333 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-l7rfh"] Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.601322 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l7rfh"] Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.601435 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l7rfh" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.652812 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-rcwhk"] Feb 23 09:09:52 crc kubenswrapper[4834]: W0223 09:09:52.662626 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod022f3b26_adfd_4fb6_b5c8_4d363e57dc71.slice/crio-dae63572e0de722f45a5664c39af71d29acd7c2ad65d4ab4cc65bc49233eeb7b WatchSource:0}: Error finding container dae63572e0de722f45a5664c39af71d29acd7c2ad65d4ab4cc65bc49233eeb7b: Status 404 returned error can't find the container with id dae63572e0de722f45a5664c39af71d29acd7c2ad65d4ab4cc65bc49233eeb7b Feb 23 09:09:52 crc kubenswrapper[4834]: E0223 09:09:52.676471 4834 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="95ec29942c140fb247fc3ef2bd3bbf3e1ed417f5378a449fecd0d42c697f0d3d" cmd=["/bin/bash","-c","test -f /ready/ready"] Feb 23 09:09:52 crc kubenswrapper[4834]: E0223 09:09:52.677992 4834 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="95ec29942c140fb247fc3ef2bd3bbf3e1ed417f5378a449fecd0d42c697f0d3d" cmd=["/bin/bash","-c","test -f /ready/ready"] Feb 23 09:09:52 crc kubenswrapper[4834]: E0223 09:09:52.679579 4834 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="95ec29942c140fb247fc3ef2bd3bbf3e1ed417f5378a449fecd0d42c697f0d3d" cmd=["/bin/bash","-c","test -f /ready/ready"] Feb 23 09:09:52 crc kubenswrapper[4834]: E0223 09:09:52.679693 4834 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" podUID="441917a1-296e-4529-a79f-458faf4769e6" containerName="kube-multus-additional-cni-plugins" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.702080 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c092b1a-4742-4813-9558-2f5b6ca34024-catalog-content\") pod \"redhat-marketplace-l7rfh\" (UID: \"2c092b1a-4742-4813-9558-2f5b6ca34024\") " pod="openshift-marketplace/redhat-marketplace-l7rfh" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.702157 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sscdv\" (UniqueName: \"kubernetes.io/projected/2c092b1a-4742-4813-9558-2f5b6ca34024-kube-api-access-sscdv\") pod \"redhat-marketplace-l7rfh\" (UID: \"2c092b1a-4742-4813-9558-2f5b6ca34024\") " pod="openshift-marketplace/redhat-marketplace-l7rfh" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.702217 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c092b1a-4742-4813-9558-2f5b6ca34024-utilities\") pod \"redhat-marketplace-l7rfh\" (UID: \"2c092b1a-4742-4813-9558-2f5b6ca34024\") " pod="openshift-marketplace/redhat-marketplace-l7rfh" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.754449 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7vzgv" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.804283 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c092b1a-4742-4813-9558-2f5b6ca34024-catalog-content\") pod \"redhat-marketplace-l7rfh\" (UID: \"2c092b1a-4742-4813-9558-2f5b6ca34024\") " pod="openshift-marketplace/redhat-marketplace-l7rfh" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.804454 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sscdv\" (UniqueName: \"kubernetes.io/projected/2c092b1a-4742-4813-9558-2f5b6ca34024-kube-api-access-sscdv\") pod \"redhat-marketplace-l7rfh\" (UID: \"2c092b1a-4742-4813-9558-2f5b6ca34024\") " pod="openshift-marketplace/redhat-marketplace-l7rfh" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.804541 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c092b1a-4742-4813-9558-2f5b6ca34024-utilities\") pod \"redhat-marketplace-l7rfh\" (UID: \"2c092b1a-4742-4813-9558-2f5b6ca34024\") " pod="openshift-marketplace/redhat-marketplace-l7rfh" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.804803 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c092b1a-4742-4813-9558-2f5b6ca34024-catalog-content\") pod \"redhat-marketplace-l7rfh\" (UID: \"2c092b1a-4742-4813-9558-2f5b6ca34024\") " pod="openshift-marketplace/redhat-marketplace-l7rfh" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.805107 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c092b1a-4742-4813-9558-2f5b6ca34024-utilities\") pod \"redhat-marketplace-l7rfh\" (UID: \"2c092b1a-4742-4813-9558-2f5b6ca34024\") " pod="openshift-marketplace/redhat-marketplace-l7rfh" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.827809 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sscdv\" (UniqueName: \"kubernetes.io/projected/2c092b1a-4742-4813-9558-2f5b6ca34024-kube-api-access-sscdv\") pod \"redhat-marketplace-l7rfh\" (UID: \"2c092b1a-4742-4813-9558-2f5b6ca34024\") " pod="openshift-marketplace/redhat-marketplace-l7rfh" Feb 23 09:09:52 crc kubenswrapper[4834]: I0223 09:09:52.919449 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l7rfh" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.023309 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" event={"ID":"022f3b26-adfd-4fb6-b5c8-4d363e57dc71","Type":"ContainerStarted","Data":"2e57c13d5fae95e69029148b8ab0761abae21c1ff8d2d4db18dbfb5a47ed88b2"} Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.024241 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.024262 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" event={"ID":"022f3b26-adfd-4fb6-b5c8-4d363e57dc71","Type":"ContainerStarted","Data":"dae63572e0de722f45a5664c39af71d29acd7c2ad65d4ab4cc65bc49233eeb7b"} Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.026462 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-qrhqs" event={"ID":"83a0c203-a9b8-462a-ba20-536641aa0721","Type":"ContainerStarted","Data":"23c3d48cce66eaa27d523c7ad65b7165d7414738a935f1b7d2493eac4649c1a4"} Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.028842 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"1e26514e-012d-41fc-b0a3-b7683bcf7b61","Type":"ContainerStarted","Data":"f1f0ea620046828de2f83832ffc6a2afe621066e2dec0ce220d5f8da6db07954"} Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.028869 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"1e26514e-012d-41fc-b0a3-b7683bcf7b61","Type":"ContainerStarted","Data":"09e6858f9859e7d3e3ddb2d29222a1f0b3e537295611f43ae15eb5dc4e364f1f"} Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.032160 4834 generic.go:334] "Generic (PLEG): container finished" podID="4ad50479-c17b-4e80-b57a-ef039e81c612" containerID="db7a6d1c9c649dd07afcb602b61c59c0acbf77f6f74386a72e8e38f881c2826f" exitCode=0 Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.032305 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kk9xp" event={"ID":"4ad50479-c17b-4e80-b57a-ef039e81c612","Type":"ContainerDied","Data":"db7a6d1c9c649dd07afcb602b61c59c0acbf77f6f74386a72e8e38f881c2826f"} Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.035961 4834 generic.go:334] "Generic (PLEG): container finished" podID="f51b8d1c-1783-401f-b9b7-aef3b5bbab05" containerID="73b485a117810e038a9da11125cfe3aa8869a0c752f78d7b7fe919f4966f236f" exitCode=0 Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.036059 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2qm7f" event={"ID":"f51b8d1c-1783-401f-b9b7-aef3b5bbab05","Type":"ContainerDied","Data":"73b485a117810e038a9da11125cfe3aa8869a0c752f78d7b7fe919f4966f236f"} Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.062560 4834 generic.go:334] "Generic (PLEG): container finished" podID="67be3aab-67ec-42d2-9158-efe9b6ee13e7" containerID="2c0edafdc67967d0d5408e6e7a9de9a96d79addef6cee56cad0a9348fa8583a2" exitCode=0 Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.062622 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jw46p" event={"ID":"67be3aab-67ec-42d2-9158-efe9b6ee13e7","Type":"ContainerDied","Data":"2c0edafdc67967d0d5408e6e7a9de9a96d79addef6cee56cad0a9348fa8583a2"} Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.074790 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" podStartSLOduration=62.074757738 podStartE2EDuration="1m2.074757738s" podCreationTimestamp="2026-02-23 09:08:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:53.064364941 +0000 UTC m=+129.142679328" watchObservedRunningTime="2026-02-23 09:09:53.074757738 +0000 UTC m=+129.153072125" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.133362 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=2.133338192 podStartE2EDuration="2.133338192s" podCreationTimestamp="2026-02-23 09:09:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:53.10513213 +0000 UTC m=+129.183446517" watchObservedRunningTime="2026-02-23 09:09:53.133338192 +0000 UTC m=+129.211652579" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.172775 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4qbwp"] Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.173980 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4qbwp" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.177683 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.195957 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4qbwp"] Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.207070 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7vzgv"] Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.337678 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f20021a4-12a6-49ae-a85f-cc8bdb6051d3-utilities\") pod \"redhat-operators-4qbwp\" (UID: \"f20021a4-12a6-49ae-a85f-cc8bdb6051d3\") " pod="openshift-marketplace/redhat-operators-4qbwp" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.338034 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f20021a4-12a6-49ae-a85f-cc8bdb6051d3-catalog-content\") pod \"redhat-operators-4qbwp\" (UID: \"f20021a4-12a6-49ae-a85f-cc8bdb6051d3\") " pod="openshift-marketplace/redhat-operators-4qbwp" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.338066 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-524z2\" (UniqueName: \"kubernetes.io/projected/f20021a4-12a6-49ae-a85f-cc8bdb6051d3-kube-api-access-524z2\") pod \"redhat-operators-4qbwp\" (UID: \"f20021a4-12a6-49ae-a85f-cc8bdb6051d3\") " pod="openshift-marketplace/redhat-operators-4qbwp" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.427233 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.429606 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.429673 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.431618 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.431662 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.440828 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f20021a4-12a6-49ae-a85f-cc8bdb6051d3-utilities\") pod \"redhat-operators-4qbwp\" (UID: \"f20021a4-12a6-49ae-a85f-cc8bdb6051d3\") " pod="openshift-marketplace/redhat-operators-4qbwp" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.440928 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f20021a4-12a6-49ae-a85f-cc8bdb6051d3-catalog-content\") pod \"redhat-operators-4qbwp\" (UID: \"f20021a4-12a6-49ae-a85f-cc8bdb6051d3\") " pod="openshift-marketplace/redhat-operators-4qbwp" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.440966 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-524z2\" (UniqueName: \"kubernetes.io/projected/f20021a4-12a6-49ae-a85f-cc8bdb6051d3-kube-api-access-524z2\") pod \"redhat-operators-4qbwp\" (UID: \"f20021a4-12a6-49ae-a85f-cc8bdb6051d3\") " pod="openshift-marketplace/redhat-operators-4qbwp" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.441778 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f20021a4-12a6-49ae-a85f-cc8bdb6051d3-utilities\") pod \"redhat-operators-4qbwp\" (UID: \"f20021a4-12a6-49ae-a85f-cc8bdb6051d3\") " pod="openshift-marketplace/redhat-operators-4qbwp" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.441999 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f20021a4-12a6-49ae-a85f-cc8bdb6051d3-catalog-content\") pod \"redhat-operators-4qbwp\" (UID: \"f20021a4-12a6-49ae-a85f-cc8bdb6051d3\") " pod="openshift-marketplace/redhat-operators-4qbwp" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.470817 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l7rfh"] Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.474176 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-524z2\" (UniqueName: \"kubernetes.io/projected/f20021a4-12a6-49ae-a85f-cc8bdb6051d3-kube-api-access-524z2\") pod \"redhat-operators-4qbwp\" (UID: \"f20021a4-12a6-49ae-a85f-cc8bdb6051d3\") " pod="openshift-marketplace/redhat-operators-4qbwp" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.502463 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4qbwp" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.541240 4834 patch_prober.go:28] interesting pod/router-default-5444994796-rf5l7 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 23 09:09:53 crc kubenswrapper[4834]: [-]has-synced failed: reason withheld Feb 23 09:09:53 crc kubenswrapper[4834]: [+]process-running ok Feb 23 09:09:53 crc kubenswrapper[4834]: healthz check failed Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.541353 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rf5l7" podUID="f489a96f-1839-4986-9340-e9b9d8960435" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.541957 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3ce89363-0385-45bf-b650-8ea76d9d9085-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"3ce89363-0385-45bf-b650-8ea76d9d9085\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.542258 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3ce89363-0385-45bf-b650-8ea76d9d9085-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"3ce89363-0385-45bf-b650-8ea76d9d9085\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.592797 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4mxvd"] Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.593922 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4mxvd" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.604354 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4mxvd"] Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.643125 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3ce89363-0385-45bf-b650-8ea76d9d9085-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"3ce89363-0385-45bf-b650-8ea76d9d9085\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.643282 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3ce89363-0385-45bf-b650-8ea76d9d9085-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"3ce89363-0385-45bf-b650-8ea76d9d9085\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.643377 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3ce89363-0385-45bf-b650-8ea76d9d9085-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"3ce89363-0385-45bf-b650-8ea76d9d9085\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.666460 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3ce89363-0385-45bf-b650-8ea76d9d9085-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"3ce89363-0385-45bf-b650-8ea76d9d9085\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.744993 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzx7h\" (UniqueName: \"kubernetes.io/projected/07261104-4428-40bf-b7f6-10319fa3ba42-kube-api-access-wzx7h\") pod \"redhat-operators-4mxvd\" (UID: \"07261104-4428-40bf-b7f6-10319fa3ba42\") " pod="openshift-marketplace/redhat-operators-4mxvd" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.745050 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07261104-4428-40bf-b7f6-10319fa3ba42-catalog-content\") pod \"redhat-operators-4mxvd\" (UID: \"07261104-4428-40bf-b7f6-10319fa3ba42\") " pod="openshift-marketplace/redhat-operators-4mxvd" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.745106 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07261104-4428-40bf-b7f6-10319fa3ba42-utilities\") pod \"redhat-operators-4mxvd\" (UID: \"07261104-4428-40bf-b7f6-10319fa3ba42\") " pod="openshift-marketplace/redhat-operators-4mxvd" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.756030 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4qbwp"] Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.761655 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 23 09:09:53 crc kubenswrapper[4834]: W0223 09:09:53.801355 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf20021a4_12a6_49ae_a85f_cc8bdb6051d3.slice/crio-758e74389d590b701fddef7fcaa84b2ac1dbcf59ec72a7d05997a851bf7e1b84 WatchSource:0}: Error finding container 758e74389d590b701fddef7fcaa84b2ac1dbcf59ec72a7d05997a851bf7e1b84: Status 404 returned error can't find the container with id 758e74389d590b701fddef7fcaa84b2ac1dbcf59ec72a7d05997a851bf7e1b84 Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.846511 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzx7h\" (UniqueName: \"kubernetes.io/projected/07261104-4428-40bf-b7f6-10319fa3ba42-kube-api-access-wzx7h\") pod \"redhat-operators-4mxvd\" (UID: \"07261104-4428-40bf-b7f6-10319fa3ba42\") " pod="openshift-marketplace/redhat-operators-4mxvd" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.846778 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07261104-4428-40bf-b7f6-10319fa3ba42-catalog-content\") pod \"redhat-operators-4mxvd\" (UID: \"07261104-4428-40bf-b7f6-10319fa3ba42\") " pod="openshift-marketplace/redhat-operators-4mxvd" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.847175 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07261104-4428-40bf-b7f6-10319fa3ba42-catalog-content\") pod \"redhat-operators-4mxvd\" (UID: \"07261104-4428-40bf-b7f6-10319fa3ba42\") " pod="openshift-marketplace/redhat-operators-4mxvd" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.847250 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07261104-4428-40bf-b7f6-10319fa3ba42-utilities\") pod \"redhat-operators-4mxvd\" (UID: \"07261104-4428-40bf-b7f6-10319fa3ba42\") " pod="openshift-marketplace/redhat-operators-4mxvd" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.847490 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07261104-4428-40bf-b7f6-10319fa3ba42-utilities\") pod \"redhat-operators-4mxvd\" (UID: \"07261104-4428-40bf-b7f6-10319fa3ba42\") " pod="openshift-marketplace/redhat-operators-4mxvd" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.869345 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzx7h\" (UniqueName: \"kubernetes.io/projected/07261104-4428-40bf-b7f6-10319fa3ba42-kube-api-access-wzx7h\") pod \"redhat-operators-4mxvd\" (UID: \"07261104-4428-40bf-b7f6-10319fa3ba42\") " pod="openshift-marketplace/redhat-operators-4mxvd" Feb 23 09:09:53 crc kubenswrapper[4834]: I0223 09:09:53.970652 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4mxvd" Feb 23 09:09:54 crc kubenswrapper[4834]: I0223 09:09:54.017181 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Feb 23 09:09:54 crc kubenswrapper[4834]: W0223 09:09:54.058375 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod3ce89363_0385_45bf_b650_8ea76d9d9085.slice/crio-95a6d8ad64a15419fe8513822b85c799a164aee5550ccbd45eae4ce5bf6c1b6a WatchSource:0}: Error finding container 95a6d8ad64a15419fe8513822b85c799a164aee5550ccbd45eae4ce5bf6c1b6a: Status 404 returned error can't find the container with id 95a6d8ad64a15419fe8513822b85c799a164aee5550ccbd45eae4ce5bf6c1b6a Feb 23 09:09:54 crc kubenswrapper[4834]: I0223 09:09:54.139002 4834 generic.go:334] "Generic (PLEG): container finished" podID="2c092b1a-4742-4813-9558-2f5b6ca34024" containerID="5e2d0c921537e32c58f758bbdab57848d1b66f6ef66c7cc17c843299f274bff3" exitCode=0 Feb 23 09:09:54 crc kubenswrapper[4834]: I0223 09:09:54.139110 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l7rfh" event={"ID":"2c092b1a-4742-4813-9558-2f5b6ca34024","Type":"ContainerDied","Data":"5e2d0c921537e32c58f758bbdab57848d1b66f6ef66c7cc17c843299f274bff3"} Feb 23 09:09:54 crc kubenswrapper[4834]: I0223 09:09:54.139170 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l7rfh" event={"ID":"2c092b1a-4742-4813-9558-2f5b6ca34024","Type":"ContainerStarted","Data":"6fd5c1571e0efbb9c7b8c3ca47e37a6b3f923dee98ca02f27df51e12a1eed6e0"} Feb 23 09:09:54 crc kubenswrapper[4834]: I0223 09:09:54.150070 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4qbwp" event={"ID":"f20021a4-12a6-49ae-a85f-cc8bdb6051d3","Type":"ContainerStarted","Data":"e33267c681bcbf3330e8922779bb01e3a5114c288c6c51bf2df4630abdffc0c9"} Feb 23 09:09:54 crc kubenswrapper[4834]: I0223 09:09:54.150126 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4qbwp" event={"ID":"f20021a4-12a6-49ae-a85f-cc8bdb6051d3","Type":"ContainerStarted","Data":"758e74389d590b701fddef7fcaa84b2ac1dbcf59ec72a7d05997a851bf7e1b84"} Feb 23 09:09:54 crc kubenswrapper[4834]: I0223 09:09:54.166941 4834 generic.go:334] "Generic (PLEG): container finished" podID="d37845a1-60c4-4708-b671-42d20f6a9b34" containerID="efd20e7f992157fee6a513b7e47e3b9ff42b4e522e0196958f78e6171b1bb5ff" exitCode=0 Feb 23 09:09:54 crc kubenswrapper[4834]: I0223 09:09:54.167298 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7vzgv" event={"ID":"d37845a1-60c4-4708-b671-42d20f6a9b34","Type":"ContainerDied","Data":"efd20e7f992157fee6a513b7e47e3b9ff42b4e522e0196958f78e6171b1bb5ff"} Feb 23 09:09:54 crc kubenswrapper[4834]: I0223 09:09:54.167361 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7vzgv" event={"ID":"d37845a1-60c4-4708-b671-42d20f6a9b34","Type":"ContainerStarted","Data":"fdd610d1623bf6c8298088923f3df8090407da9490202eb45872da2bf8274071"} Feb 23 09:09:54 crc kubenswrapper[4834]: I0223 09:09:54.172066 4834 generic.go:334] "Generic (PLEG): container finished" podID="1e26514e-012d-41fc-b0a3-b7683bcf7b61" containerID="f1f0ea620046828de2f83832ffc6a2afe621066e2dec0ce220d5f8da6db07954" exitCode=0 Feb 23 09:09:54 crc kubenswrapper[4834]: I0223 09:09:54.172188 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"1e26514e-012d-41fc-b0a3-b7683bcf7b61","Type":"ContainerDied","Data":"f1f0ea620046828de2f83832ffc6a2afe621066e2dec0ce220d5f8da6db07954"} Feb 23 09:09:54 crc kubenswrapper[4834]: I0223 09:09:54.492559 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4mxvd"] Feb 23 09:09:54 crc kubenswrapper[4834]: I0223 09:09:54.541801 4834 patch_prober.go:28] interesting pod/router-default-5444994796-rf5l7 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 23 09:09:54 crc kubenswrapper[4834]: [-]has-synced failed: reason withheld Feb 23 09:09:54 crc kubenswrapper[4834]: [+]process-running ok Feb 23 09:09:54 crc kubenswrapper[4834]: healthz check failed Feb 23 09:09:54 crc kubenswrapper[4834]: I0223 09:09:54.541880 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rf5l7" podUID="f489a96f-1839-4986-9340-e9b9d8960435" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 23 09:09:55 crc kubenswrapper[4834]: I0223 09:09:55.185764 4834 generic.go:334] "Generic (PLEG): container finished" podID="f20021a4-12a6-49ae-a85f-cc8bdb6051d3" containerID="e33267c681bcbf3330e8922779bb01e3a5114c288c6c51bf2df4630abdffc0c9" exitCode=0 Feb 23 09:09:55 crc kubenswrapper[4834]: I0223 09:09:55.185863 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4qbwp" event={"ID":"f20021a4-12a6-49ae-a85f-cc8bdb6051d3","Type":"ContainerDied","Data":"e33267c681bcbf3330e8922779bb01e3a5114c288c6c51bf2df4630abdffc0c9"} Feb 23 09:09:55 crc kubenswrapper[4834]: I0223 09:09:55.207799 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3ce89363-0385-45bf-b650-8ea76d9d9085","Type":"ContainerStarted","Data":"c71109c1f716d0cd760d3bfa479acd8a3e153a78bfd1edbb8fc5cc0d189bd3a9"} Feb 23 09:09:55 crc kubenswrapper[4834]: I0223 09:09:55.207882 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3ce89363-0385-45bf-b650-8ea76d9d9085","Type":"ContainerStarted","Data":"95a6d8ad64a15419fe8513822b85c799a164aee5550ccbd45eae4ce5bf6c1b6a"} Feb 23 09:09:55 crc kubenswrapper[4834]: I0223 09:09:55.234843 4834 generic.go:334] "Generic (PLEG): container finished" podID="07261104-4428-40bf-b7f6-10319fa3ba42" containerID="277c46fb56097c4601efc79e71889d5e4dee68e9ba0349169c240f4d1dfc7a41" exitCode=0 Feb 23 09:09:55 crc kubenswrapper[4834]: I0223 09:09:55.235934 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4mxvd" event={"ID":"07261104-4428-40bf-b7f6-10319fa3ba42","Type":"ContainerDied","Data":"277c46fb56097c4601efc79e71889d5e4dee68e9ba0349169c240f4d1dfc7a41"} Feb 23 09:09:55 crc kubenswrapper[4834]: I0223 09:09:55.235989 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4mxvd" event={"ID":"07261104-4428-40bf-b7f6-10319fa3ba42","Type":"ContainerStarted","Data":"6661c1b803ad3bd67b4d3faeb2f86851e669c7e9cdc0ee07c960eb431c3375fa"} Feb 23 09:09:55 crc kubenswrapper[4834]: I0223 09:09:55.257257 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.2572366649999998 podStartE2EDuration="2.257236665s" podCreationTimestamp="2026-02-23 09:09:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:09:55.25631297 +0000 UTC m=+131.334627367" watchObservedRunningTime="2026-02-23 09:09:55.257236665 +0000 UTC m=+131.335551052" Feb 23 09:09:55 crc kubenswrapper[4834]: E0223 09:09:55.402336 4834 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-pod3ce89363_0385_45bf_b650_8ea76d9d9085.slice/crio-conmon-c71109c1f716d0cd760d3bfa479acd8a3e153a78bfd1edbb8fc5cc0d189bd3a9.scope\": RecentStats: unable to find data in memory cache]" Feb 23 09:09:55 crc kubenswrapper[4834]: I0223 09:09:55.560588 4834 patch_prober.go:28] interesting pod/router-default-5444994796-rf5l7 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 23 09:09:55 crc kubenswrapper[4834]: [-]has-synced failed: reason withheld Feb 23 09:09:55 crc kubenswrapper[4834]: [+]process-running ok Feb 23 09:09:55 crc kubenswrapper[4834]: healthz check failed Feb 23 09:09:55 crc kubenswrapper[4834]: I0223 09:09:55.560666 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rf5l7" podUID="f489a96f-1839-4986-9340-e9b9d8960435" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 23 09:09:55 crc kubenswrapper[4834]: I0223 09:09:55.641207 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 23 09:09:55 crc kubenswrapper[4834]: I0223 09:09:55.752899 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:55 crc kubenswrapper[4834]: I0223 09:09:55.758857 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-897nm" Feb 23 09:09:55 crc kubenswrapper[4834]: I0223 09:09:55.794924 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1e26514e-012d-41fc-b0a3-b7683bcf7b61-kube-api-access\") pod \"1e26514e-012d-41fc-b0a3-b7683bcf7b61\" (UID: \"1e26514e-012d-41fc-b0a3-b7683bcf7b61\") " Feb 23 09:09:55 crc kubenswrapper[4834]: I0223 09:09:55.795013 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1e26514e-012d-41fc-b0a3-b7683bcf7b61-kubelet-dir\") pod \"1e26514e-012d-41fc-b0a3-b7683bcf7b61\" (UID: \"1e26514e-012d-41fc-b0a3-b7683bcf7b61\") " Feb 23 09:09:55 crc kubenswrapper[4834]: I0223 09:09:55.795617 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/1e26514e-012d-41fc-b0a3-b7683bcf7b61-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "1e26514e-012d-41fc-b0a3-b7683bcf7b61" (UID: "1e26514e-012d-41fc-b0a3-b7683bcf7b61"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:09:55 crc kubenswrapper[4834]: I0223 09:09:55.846551 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1e26514e-012d-41fc-b0a3-b7683bcf7b61-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1e26514e-012d-41fc-b0a3-b7683bcf7b61" (UID: "1e26514e-012d-41fc-b0a3-b7683bcf7b61"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:09:55 crc kubenswrapper[4834]: I0223 09:09:55.901374 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1e26514e-012d-41fc-b0a3-b7683bcf7b61-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 23 09:09:55 crc kubenswrapper[4834]: I0223 09:09:55.901448 4834 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/1e26514e-012d-41fc-b0a3-b7683bcf7b61-kubelet-dir\") on node \"crc\" DevicePath \"\"" Feb 23 09:09:56 crc kubenswrapper[4834]: I0223 09:09:56.244144 4834 generic.go:334] "Generic (PLEG): container finished" podID="3ce89363-0385-45bf-b650-8ea76d9d9085" containerID="c71109c1f716d0cd760d3bfa479acd8a3e153a78bfd1edbb8fc5cc0d189bd3a9" exitCode=0 Feb 23 09:09:56 crc kubenswrapper[4834]: I0223 09:09:56.244207 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3ce89363-0385-45bf-b650-8ea76d9d9085","Type":"ContainerDied","Data":"c71109c1f716d0cd760d3bfa479acd8a3e153a78bfd1edbb8fc5cc0d189bd3a9"} Feb 23 09:09:56 crc kubenswrapper[4834]: I0223 09:09:56.246611 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Feb 23 09:09:56 crc kubenswrapper[4834]: I0223 09:09:56.246803 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"1e26514e-012d-41fc-b0a3-b7683bcf7b61","Type":"ContainerDied","Data":"09e6858f9859e7d3e3ddb2d29222a1f0b3e537295611f43ae15eb5dc4e364f1f"} Feb 23 09:09:56 crc kubenswrapper[4834]: I0223 09:09:56.246937 4834 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="09e6858f9859e7d3e3ddb2d29222a1f0b3e537295611f43ae15eb5dc4e364f1f" Feb 23 09:09:56 crc kubenswrapper[4834]: I0223 09:09:56.541121 4834 patch_prober.go:28] interesting pod/router-default-5444994796-rf5l7 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 23 09:09:56 crc kubenswrapper[4834]: [-]has-synced failed: reason withheld Feb 23 09:09:56 crc kubenswrapper[4834]: [+]process-running ok Feb 23 09:09:56 crc kubenswrapper[4834]: healthz check failed Feb 23 09:09:56 crc kubenswrapper[4834]: I0223 09:09:56.541182 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rf5l7" podUID="f489a96f-1839-4986-9340-e9b9d8960435" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 23 09:09:57 crc kubenswrapper[4834]: I0223 09:09:57.407736 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-c9zqb" Feb 23 09:09:57 crc kubenswrapper[4834]: I0223 09:09:57.541947 4834 patch_prober.go:28] interesting pod/router-default-5444994796-rf5l7 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 23 09:09:57 crc kubenswrapper[4834]: [-]has-synced failed: reason withheld Feb 23 09:09:57 crc kubenswrapper[4834]: [+]process-running ok Feb 23 09:09:57 crc kubenswrapper[4834]: healthz check failed Feb 23 09:09:57 crc kubenswrapper[4834]: I0223 09:09:57.542348 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rf5l7" podUID="f489a96f-1839-4986-9340-e9b9d8960435" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 23 09:09:58 crc kubenswrapper[4834]: I0223 09:09:58.363754 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:09:58 crc kubenswrapper[4834]: I0223 09:09:58.496359 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 23 09:09:58 crc kubenswrapper[4834]: I0223 09:09:58.580670 4834 patch_prober.go:28] interesting pod/router-default-5444994796-rf5l7 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 23 09:09:58 crc kubenswrapper[4834]: [-]has-synced failed: reason withheld Feb 23 09:09:58 crc kubenswrapper[4834]: [+]process-running ok Feb 23 09:09:58 crc kubenswrapper[4834]: healthz check failed Feb 23 09:09:58 crc kubenswrapper[4834]: I0223 09:09:58.580758 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rf5l7" podUID="f489a96f-1839-4986-9340-e9b9d8960435" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 23 09:09:58 crc kubenswrapper[4834]: I0223 09:09:58.596910 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3ce89363-0385-45bf-b650-8ea76d9d9085-kube-api-access\") pod \"3ce89363-0385-45bf-b650-8ea76d9d9085\" (UID: \"3ce89363-0385-45bf-b650-8ea76d9d9085\") " Feb 23 09:09:58 crc kubenswrapper[4834]: I0223 09:09:58.597006 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3ce89363-0385-45bf-b650-8ea76d9d9085-kubelet-dir\") pod \"3ce89363-0385-45bf-b650-8ea76d9d9085\" (UID: \"3ce89363-0385-45bf-b650-8ea76d9d9085\") " Feb 23 09:09:58 crc kubenswrapper[4834]: I0223 09:09:58.597473 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/3ce89363-0385-45bf-b650-8ea76d9d9085-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "3ce89363-0385-45bf-b650-8ea76d9d9085" (UID: "3ce89363-0385-45bf-b650-8ea76d9d9085"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:09:58 crc kubenswrapper[4834]: I0223 09:09:58.628511 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ce89363-0385-45bf-b650-8ea76d9d9085-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "3ce89363-0385-45bf-b650-8ea76d9d9085" (UID: "3ce89363-0385-45bf-b650-8ea76d9d9085"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:09:58 crc kubenswrapper[4834]: I0223 09:09:58.698800 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/3ce89363-0385-45bf-b650-8ea76d9d9085-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 23 09:09:58 crc kubenswrapper[4834]: I0223 09:09:58.699322 4834 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/3ce89363-0385-45bf-b650-8ea76d9d9085-kubelet-dir\") on node \"crc\" DevicePath \"\"" Feb 23 09:09:59 crc kubenswrapper[4834]: I0223 09:09:59.312834 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"3ce89363-0385-45bf-b650-8ea76d9d9085","Type":"ContainerDied","Data":"95a6d8ad64a15419fe8513822b85c799a164aee5550ccbd45eae4ce5bf6c1b6a"} Feb 23 09:09:59 crc kubenswrapper[4834]: I0223 09:09:59.312950 4834 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="95a6d8ad64a15419fe8513822b85c799a164aee5550ccbd45eae4ce5bf6c1b6a" Feb 23 09:09:59 crc kubenswrapper[4834]: I0223 09:09:59.313154 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Feb 23 09:09:59 crc kubenswrapper[4834]: I0223 09:09:59.542851 4834 patch_prober.go:28] interesting pod/router-default-5444994796-rf5l7 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 23 09:09:59 crc kubenswrapper[4834]: [-]has-synced failed: reason withheld Feb 23 09:09:59 crc kubenswrapper[4834]: [+]process-running ok Feb 23 09:09:59 crc kubenswrapper[4834]: healthz check failed Feb 23 09:09:59 crc kubenswrapper[4834]: I0223 09:09:59.542930 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rf5l7" podUID="f489a96f-1839-4986-9340-e9b9d8960435" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 23 09:09:59 crc kubenswrapper[4834]: I0223 09:09:59.632807 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Feb 23 09:10:00 crc kubenswrapper[4834]: I0223 09:10:00.471220 4834 patch_prober.go:28] interesting pod/console-f9d7485db-g9jpx container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.29:8443/health\": dial tcp 10.217.0.29:8443: connect: connection refused" start-of-body= Feb 23 09:10:00 crc kubenswrapper[4834]: I0223 09:10:00.471289 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-g9jpx" podUID="24eb6775-1135-4cc7-9e62-103e142f285a" containerName="console" probeResult="failure" output="Get \"https://10.217.0.29:8443/health\": dial tcp 10.217.0.29:8443: connect: connection refused" Feb 23 09:10:00 crc kubenswrapper[4834]: I0223 09:10:00.541363 4834 patch_prober.go:28] interesting pod/router-default-5444994796-rf5l7 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 23 09:10:00 crc kubenswrapper[4834]: [-]has-synced failed: reason withheld Feb 23 09:10:00 crc kubenswrapper[4834]: [+]process-running ok Feb 23 09:10:00 crc kubenswrapper[4834]: healthz check failed Feb 23 09:10:00 crc kubenswrapper[4834]: I0223 09:10:00.541494 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rf5l7" podUID="f489a96f-1839-4986-9340-e9b9d8960435" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 23 09:10:01 crc kubenswrapper[4834]: I0223 09:10:01.483821 4834 patch_prober.go:28] interesting pod/downloads-7954f5f757-mxsss container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Feb 23 09:10:01 crc kubenswrapper[4834]: I0223 09:10:01.483899 4834 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-mxsss" podUID="fbca2b49-c933-408c-9c80-fb1202bfb6f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Feb 23 09:10:01 crc kubenswrapper[4834]: I0223 09:10:01.484112 4834 patch_prober.go:28] interesting pod/downloads-7954f5f757-mxsss container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Feb 23 09:10:01 crc kubenswrapper[4834]: I0223 09:10:01.484184 4834 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-mxsss" podUID="fbca2b49-c933-408c-9c80-fb1202bfb6f1" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Feb 23 09:10:01 crc kubenswrapper[4834]: I0223 09:10:01.542090 4834 patch_prober.go:28] interesting pod/router-default-5444994796-rf5l7 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 23 09:10:01 crc kubenswrapper[4834]: [-]has-synced failed: reason withheld Feb 23 09:10:01 crc kubenswrapper[4834]: [+]process-running ok Feb 23 09:10:01 crc kubenswrapper[4834]: healthz check failed Feb 23 09:10:01 crc kubenswrapper[4834]: I0223 09:10:01.542755 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rf5l7" podUID="f489a96f-1839-4986-9340-e9b9d8960435" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 23 09:10:02 crc kubenswrapper[4834]: I0223 09:10:02.539861 4834 patch_prober.go:28] interesting pod/router-default-5444994796-rf5l7 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Feb 23 09:10:02 crc kubenswrapper[4834]: [+]has-synced ok Feb 23 09:10:02 crc kubenswrapper[4834]: [+]process-running ok Feb 23 09:10:02 crc kubenswrapper[4834]: healthz check failed Feb 23 09:10:02 crc kubenswrapper[4834]: I0223 09:10:02.539932 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-rf5l7" podUID="f489a96f-1839-4986-9340-e9b9d8960435" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Feb 23 09:10:02 crc kubenswrapper[4834]: E0223 09:10:02.679926 4834 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="95ec29942c140fb247fc3ef2bd3bbf3e1ed417f5378a449fecd0d42c697f0d3d" cmd=["/bin/bash","-c","test -f /ready/ready"] Feb 23 09:10:02 crc kubenswrapper[4834]: E0223 09:10:02.687350 4834 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="95ec29942c140fb247fc3ef2bd3bbf3e1ed417f5378a449fecd0d42c697f0d3d" cmd=["/bin/bash","-c","test -f /ready/ready"] Feb 23 09:10:02 crc kubenswrapper[4834]: E0223 09:10:02.710491 4834 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="95ec29942c140fb247fc3ef2bd3bbf3e1ed417f5378a449fecd0d42c697f0d3d" cmd=["/bin/bash","-c","test -f /ready/ready"] Feb 23 09:10:02 crc kubenswrapper[4834]: E0223 09:10:02.710590 4834 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" podUID="441917a1-296e-4529-a79f-458faf4769e6" containerName="kube-multus-additional-cni-plugins" Feb 23 09:10:03 crc kubenswrapper[4834]: I0223 09:10:03.543136 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-rf5l7" Feb 23 09:10:03 crc kubenswrapper[4834]: I0223 09:10:03.545620 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-rf5l7" Feb 23 09:10:03 crc kubenswrapper[4834]: I0223 09:10:03.595803 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=4.595779866 podStartE2EDuration="4.595779866s" podCreationTimestamp="2026-02-23 09:09:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:10:03.575427439 +0000 UTC m=+139.653741826" watchObservedRunningTime="2026-02-23 09:10:03.595779866 +0000 UTC m=+139.674094253" Feb 23 09:10:07 crc kubenswrapper[4834]: I0223 09:10:07.542551 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:10:07 crc kubenswrapper[4834]: I0223 09:10:07.543076 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:10:07 crc kubenswrapper[4834]: I0223 09:10:07.543129 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:10:07 crc kubenswrapper[4834]: I0223 09:10:07.543149 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:10:07 crc kubenswrapper[4834]: I0223 09:10:07.546209 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Feb 23 09:10:07 crc kubenswrapper[4834]: I0223 09:10:07.546981 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Feb 23 09:10:07 crc kubenswrapper[4834]: I0223 09:10:07.553076 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Feb 23 09:10:07 crc kubenswrapper[4834]: I0223 09:10:07.554712 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:10:07 crc kubenswrapper[4834]: I0223 09:10:07.556798 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Feb 23 09:10:07 crc kubenswrapper[4834]: I0223 09:10:07.564269 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:10:07 crc kubenswrapper[4834]: I0223 09:10:07.567222 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:10:07 crc kubenswrapper[4834]: I0223 09:10:07.570719 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:10:07 crc kubenswrapper[4834]: I0223 09:10:07.602325 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Feb 23 09:10:07 crc kubenswrapper[4834]: I0223 09:10:07.612535 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Feb 23 09:10:07 crc kubenswrapper[4834]: I0223 09:10:07.623227 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:10:10 crc kubenswrapper[4834]: I0223 09:10:10.489498 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:10:10 crc kubenswrapper[4834]: I0223 09:10:10.493355 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-g9jpx" Feb 23 09:10:11 crc kubenswrapper[4834]: I0223 09:10:11.488537 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-mxsss" Feb 23 09:10:12 crc kubenswrapper[4834]: I0223 09:10:12.366271 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:10:12 crc kubenswrapper[4834]: E0223 09:10:12.675813 4834 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="95ec29942c140fb247fc3ef2bd3bbf3e1ed417f5378a449fecd0d42c697f0d3d" cmd=["/bin/bash","-c","test -f /ready/ready"] Feb 23 09:10:12 crc kubenswrapper[4834]: E0223 09:10:12.677414 4834 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="95ec29942c140fb247fc3ef2bd3bbf3e1ed417f5378a449fecd0d42c697f0d3d" cmd=["/bin/bash","-c","test -f /ready/ready"] Feb 23 09:10:12 crc kubenswrapper[4834]: E0223 09:10:12.679370 4834 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="95ec29942c140fb247fc3ef2bd3bbf3e1ed417f5378a449fecd0d42c697f0d3d" cmd=["/bin/bash","-c","test -f /ready/ready"] Feb 23 09:10:12 crc kubenswrapper[4834]: E0223 09:10:12.679466 4834 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" podUID="441917a1-296e-4529-a79f-458faf4769e6" containerName="kube-multus-additional-cni-plugins" Feb 23 09:10:20 crc kubenswrapper[4834]: I0223 09:10:20.521245 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_cni-sysctl-allowlist-ds-rlmdc_441917a1-296e-4529-a79f-458faf4769e6/kube-multus-additional-cni-plugins/0.log" Feb 23 09:10:20 crc kubenswrapper[4834]: I0223 09:10:20.521968 4834 generic.go:334] "Generic (PLEG): container finished" podID="441917a1-296e-4529-a79f-458faf4769e6" containerID="95ec29942c140fb247fc3ef2bd3bbf3e1ed417f5378a449fecd0d42c697f0d3d" exitCode=137 Feb 23 09:10:20 crc kubenswrapper[4834]: I0223 09:10:20.522016 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" event={"ID":"441917a1-296e-4529-a79f-458faf4769e6","Type":"ContainerDied","Data":"95ec29942c140fb247fc3ef2bd3bbf3e1ed417f5378a449fecd0d42c697f0d3d"} Feb 23 09:10:20 crc kubenswrapper[4834]: I0223 09:10:20.598122 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Feb 23 09:10:21 crc kubenswrapper[4834]: E0223 09:10:21.264836 4834 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Feb 23 09:10:21 crc kubenswrapper[4834]: E0223 09:10:21.265320 4834 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-7l2f5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-5vvxt_openshift-marketplace(19479380-b603-400a-99e9-6b8186f42f33): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 23 09:10:21 crc kubenswrapper[4834]: E0223 09:10:21.266453 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-5vvxt" podUID="19479380-b603-400a-99e9-6b8186f42f33" Feb 23 09:10:22 crc kubenswrapper[4834]: I0223 09:10:22.361151 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-vdn4d" Feb 23 09:10:22 crc kubenswrapper[4834]: I0223 09:10:22.380012 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=2.379988388 podStartE2EDuration="2.379988388s" podCreationTimestamp="2026-02-23 09:10:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:10:21.576850274 +0000 UTC m=+157.655164681" watchObservedRunningTime="2026-02-23 09:10:22.379988388 +0000 UTC m=+158.458302775" Feb 23 09:10:22 crc kubenswrapper[4834]: E0223 09:10:22.448281 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-5vvxt" podUID="19479380-b603-400a-99e9-6b8186f42f33" Feb 23 09:10:22 crc kubenswrapper[4834]: E0223 09:10:22.674102 4834 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 95ec29942c140fb247fc3ef2bd3bbf3e1ed417f5378a449fecd0d42c697f0d3d is running failed: container process not found" containerID="95ec29942c140fb247fc3ef2bd3bbf3e1ed417f5378a449fecd0d42c697f0d3d" cmd=["/bin/bash","-c","test -f /ready/ready"] Feb 23 09:10:22 crc kubenswrapper[4834]: E0223 09:10:22.674854 4834 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 95ec29942c140fb247fc3ef2bd3bbf3e1ed417f5378a449fecd0d42c697f0d3d is running failed: container process not found" containerID="95ec29942c140fb247fc3ef2bd3bbf3e1ed417f5378a449fecd0d42c697f0d3d" cmd=["/bin/bash","-c","test -f /ready/ready"] Feb 23 09:10:22 crc kubenswrapper[4834]: E0223 09:10:22.675177 4834 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 95ec29942c140fb247fc3ef2bd3bbf3e1ed417f5378a449fecd0d42c697f0d3d is running failed: container process not found" containerID="95ec29942c140fb247fc3ef2bd3bbf3e1ed417f5378a449fecd0d42c697f0d3d" cmd=["/bin/bash","-c","test -f /ready/ready"] Feb 23 09:10:22 crc kubenswrapper[4834]: E0223 09:10:22.675215 4834 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 95ec29942c140fb247fc3ef2bd3bbf3e1ed417f5378a449fecd0d42c697f0d3d is running failed: container process not found" probeType="Readiness" pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" podUID="441917a1-296e-4529-a79f-458faf4769e6" containerName="kube-multus-additional-cni-plugins" Feb 23 09:10:26 crc kubenswrapper[4834]: I0223 09:10:26.798660 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Feb 23 09:10:26 crc kubenswrapper[4834]: E0223 09:10:26.799655 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1e26514e-012d-41fc-b0a3-b7683bcf7b61" containerName="pruner" Feb 23 09:10:26 crc kubenswrapper[4834]: I0223 09:10:26.799669 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="1e26514e-012d-41fc-b0a3-b7683bcf7b61" containerName="pruner" Feb 23 09:10:26 crc kubenswrapper[4834]: E0223 09:10:26.799682 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ce89363-0385-45bf-b650-8ea76d9d9085" containerName="pruner" Feb 23 09:10:26 crc kubenswrapper[4834]: I0223 09:10:26.799688 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ce89363-0385-45bf-b650-8ea76d9d9085" containerName="pruner" Feb 23 09:10:26 crc kubenswrapper[4834]: I0223 09:10:26.799927 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ce89363-0385-45bf-b650-8ea76d9d9085" containerName="pruner" Feb 23 09:10:26 crc kubenswrapper[4834]: I0223 09:10:26.799945 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="1e26514e-012d-41fc-b0a3-b7683bcf7b61" containerName="pruner" Feb 23 09:10:26 crc kubenswrapper[4834]: I0223 09:10:26.800361 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 23 09:10:26 crc kubenswrapper[4834]: I0223 09:10:26.802814 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Feb 23 09:10:26 crc kubenswrapper[4834]: I0223 09:10:26.804918 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Feb 23 09:10:26 crc kubenswrapper[4834]: I0223 09:10:26.805926 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Feb 23 09:10:26 crc kubenswrapper[4834]: I0223 09:10:26.922327 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b7b400ca-5d3b-41a1-bd67-e4f8e7fca799-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"b7b400ca-5d3b-41a1-bd67-e4f8e7fca799\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 23 09:10:26 crc kubenswrapper[4834]: I0223 09:10:26.922415 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b7b400ca-5d3b-41a1-bd67-e4f8e7fca799-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"b7b400ca-5d3b-41a1-bd67-e4f8e7fca799\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 23 09:10:27 crc kubenswrapper[4834]: I0223 09:10:27.024128 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b7b400ca-5d3b-41a1-bd67-e4f8e7fca799-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"b7b400ca-5d3b-41a1-bd67-e4f8e7fca799\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 23 09:10:27 crc kubenswrapper[4834]: I0223 09:10:27.024254 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b7b400ca-5d3b-41a1-bd67-e4f8e7fca799-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"b7b400ca-5d3b-41a1-bd67-e4f8e7fca799\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 23 09:10:27 crc kubenswrapper[4834]: I0223 09:10:27.024595 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b7b400ca-5d3b-41a1-bd67-e4f8e7fca799-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"b7b400ca-5d3b-41a1-bd67-e4f8e7fca799\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 23 09:10:27 crc kubenswrapper[4834]: I0223 09:10:27.045920 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b7b400ca-5d3b-41a1-bd67-e4f8e7fca799-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"b7b400ca-5d3b-41a1-bd67-e4f8e7fca799\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 23 09:10:27 crc kubenswrapper[4834]: I0223 09:10:27.135943 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 23 09:10:27 crc kubenswrapper[4834]: I0223 09:10:27.605674 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_cni-sysctl-allowlist-ds-rlmdc_441917a1-296e-4529-a79f-458faf4769e6/kube-multus-additional-cni-plugins/0.log" Feb 23 09:10:27 crc kubenswrapper[4834]: I0223 09:10:27.605750 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" Feb 23 09:10:27 crc kubenswrapper[4834]: E0223 09:10:27.640702 4834 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Feb 23 09:10:27 crc kubenswrapper[4834]: E0223 09:10:27.641809 4834 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-89srs,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-jw46p_openshift-marketplace(67be3aab-67ec-42d2-9158-efe9b6ee13e7): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 23 09:10:27 crc kubenswrapper[4834]: E0223 09:10:27.643070 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-jw46p" podUID="67be3aab-67ec-42d2-9158-efe9b6ee13e7" Feb 23 09:10:27 crc kubenswrapper[4834]: E0223 09:10:27.670444 4834 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Feb 23 09:10:27 crc kubenswrapper[4834]: E0223 09:10:27.670636 4834 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ktfgk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-kk9xp_openshift-marketplace(4ad50479-c17b-4e80-b57a-ef039e81c612): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Feb 23 09:10:27 crc kubenswrapper[4834]: E0223 09:10:27.671825 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-kk9xp" podUID="4ad50479-c17b-4e80-b57a-ef039e81c612" Feb 23 09:10:27 crc kubenswrapper[4834]: I0223 09:10:27.734946 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-btxmv\" (UniqueName: \"kubernetes.io/projected/441917a1-296e-4529-a79f-458faf4769e6-kube-api-access-btxmv\") pod \"441917a1-296e-4529-a79f-458faf4769e6\" (UID: \"441917a1-296e-4529-a79f-458faf4769e6\") " Feb 23 09:10:27 crc kubenswrapper[4834]: I0223 09:10:27.735467 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/441917a1-296e-4529-a79f-458faf4769e6-cni-sysctl-allowlist\") pod \"441917a1-296e-4529-a79f-458faf4769e6\" (UID: \"441917a1-296e-4529-a79f-458faf4769e6\") " Feb 23 09:10:27 crc kubenswrapper[4834]: I0223 09:10:27.735521 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/441917a1-296e-4529-a79f-458faf4769e6-tuning-conf-dir\") pod \"441917a1-296e-4529-a79f-458faf4769e6\" (UID: \"441917a1-296e-4529-a79f-458faf4769e6\") " Feb 23 09:10:27 crc kubenswrapper[4834]: I0223 09:10:27.735568 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/441917a1-296e-4529-a79f-458faf4769e6-ready\") pod \"441917a1-296e-4529-a79f-458faf4769e6\" (UID: \"441917a1-296e-4529-a79f-458faf4769e6\") " Feb 23 09:10:27 crc kubenswrapper[4834]: I0223 09:10:27.737026 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/441917a1-296e-4529-a79f-458faf4769e6-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "441917a1-296e-4529-a79f-458faf4769e6" (UID: "441917a1-296e-4529-a79f-458faf4769e6"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:10:27 crc kubenswrapper[4834]: I0223 09:10:27.737115 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/441917a1-296e-4529-a79f-458faf4769e6-tuning-conf-dir" (OuterVolumeSpecName: "tuning-conf-dir") pod "441917a1-296e-4529-a79f-458faf4769e6" (UID: "441917a1-296e-4529-a79f-458faf4769e6"). InnerVolumeSpecName "tuning-conf-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:10:27 crc kubenswrapper[4834]: I0223 09:10:27.737410 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/441917a1-296e-4529-a79f-458faf4769e6-ready" (OuterVolumeSpecName: "ready") pod "441917a1-296e-4529-a79f-458faf4769e6" (UID: "441917a1-296e-4529-a79f-458faf4769e6"). InnerVolumeSpecName "ready". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:10:27 crc kubenswrapper[4834]: I0223 09:10:27.746526 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/441917a1-296e-4529-a79f-458faf4769e6-kube-api-access-btxmv" (OuterVolumeSpecName: "kube-api-access-btxmv") pod "441917a1-296e-4529-a79f-458faf4769e6" (UID: "441917a1-296e-4529-a79f-458faf4769e6"). InnerVolumeSpecName "kube-api-access-btxmv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:10:27 crc kubenswrapper[4834]: I0223 09:10:27.837127 4834 reconciler_common.go:293] "Volume detached for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/441917a1-296e-4529-a79f-458faf4769e6-tuning-conf-dir\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:27 crc kubenswrapper[4834]: I0223 09:10:27.837169 4834 reconciler_common.go:293] "Volume detached for volume \"ready\" (UniqueName: \"kubernetes.io/empty-dir/441917a1-296e-4529-a79f-458faf4769e6-ready\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:27 crc kubenswrapper[4834]: I0223 09:10:27.837178 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-btxmv\" (UniqueName: \"kubernetes.io/projected/441917a1-296e-4529-a79f-458faf4769e6-kube-api-access-btxmv\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:27 crc kubenswrapper[4834]: I0223 09:10:27.837191 4834 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/441917a1-296e-4529-a79f-458faf4769e6-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:28 crc kubenswrapper[4834]: W0223 09:10:28.086149 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-639c94742c34eb5bc9a716f421c6b6ca9c39b7b32faff478612ca763199d252b WatchSource:0}: Error finding container 639c94742c34eb5bc9a716f421c6b6ca9c39b7b32faff478612ca763199d252b: Status 404 returned error can't find the container with id 639c94742c34eb5bc9a716f421c6b6ca9c39b7b32faff478612ca763199d252b Feb 23 09:10:28 crc kubenswrapper[4834]: W0223 09:10:28.131352 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-ba6b1916ddcf07792b55d806174d06a3bf18e81a816b38e669f920c130a0731e WatchSource:0}: Error finding container ba6b1916ddcf07792b55d806174d06a3bf18e81a816b38e669f920c130a0731e: Status 404 returned error can't find the container with id ba6b1916ddcf07792b55d806174d06a3bf18e81a816b38e669f920c130a0731e Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.242231 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Feb 23 09:10:28 crc kubenswrapper[4834]: W0223 09:10:28.282762 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podb7b400ca_5d3b_41a1_bd67_e4f8e7fca799.slice/crio-fb27044949127394059ea593f514a824e3cde4584c1e8d6d79db17951050c09a WatchSource:0}: Error finding container fb27044949127394059ea593f514a824e3cde4584c1e8d6d79db17951050c09a: Status 404 returned error can't find the container with id fb27044949127394059ea593f514a824e3cde4584c1e8d6d79db17951050c09a Feb 23 09:10:28 crc kubenswrapper[4834]: W0223 09:10:28.283328 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-fdc692b138aa0e680865b6ba8e7282dc01704826d42f1e85c893a73a634ba781 WatchSource:0}: Error finding container fdc692b138aa0e680865b6ba8e7282dc01704826d42f1e85c893a73a634ba781: Status 404 returned error can't find the container with id fdc692b138aa0e680865b6ba8e7282dc01704826d42f1e85c893a73a634ba781 Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.570612 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"b7b400ca-5d3b-41a1-bd67-e4f8e7fca799","Type":"ContainerStarted","Data":"fb27044949127394059ea593f514a824e3cde4584c1e8d6d79db17951050c09a"} Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.573107 4834 generic.go:334] "Generic (PLEG): container finished" podID="2c092b1a-4742-4813-9558-2f5b6ca34024" containerID="a305c850830ffcb63fa2c6d557c480fdca57de0a1559f8dc6331ad8d4380237f" exitCode=0 Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.573229 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l7rfh" event={"ID":"2c092b1a-4742-4813-9558-2f5b6ca34024","Type":"ContainerDied","Data":"a305c850830ffcb63fa2c6d557c480fdca57de0a1559f8dc6331ad8d4380237f"} Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.575485 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"b357271865a948058c566d53c71fa412abc53a98b63a0f3deb689dd73124947a"} Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.575537 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"639c94742c34eb5bc9a716f421c6b6ca9c39b7b32faff478612ca763199d252b"} Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.579263 4834 generic.go:334] "Generic (PLEG): container finished" podID="f51b8d1c-1783-401f-b9b7-aef3b5bbab05" containerID="233fefb2254ec75643ea01951263eb1e2e6aa53cb26e65fcb3acbd16ce251ea9" exitCode=0 Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.579330 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2qm7f" event={"ID":"f51b8d1c-1783-401f-b9b7-aef3b5bbab05","Type":"ContainerDied","Data":"233fefb2254ec75643ea01951263eb1e2e6aa53cb26e65fcb3acbd16ce251ea9"} Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.581276 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"f0409e0d9f85f6aadbe65f43e5a6dc7ff80980134b4cd6e5fabe0c154a6354d9"} Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.581309 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"ba6b1916ddcf07792b55d806174d06a3bf18e81a816b38e669f920c130a0731e"} Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.581857 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.598786 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4mxvd" event={"ID":"07261104-4428-40bf-b7f6-10319fa3ba42","Type":"ContainerStarted","Data":"d6b856f71c75a9770d9c5434d8106065db73356751b6209ec3341c6ec883f199"} Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.600870 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"0ac21bf234f97ea6b3261a54c1b183b591bf0d33a74d3de05d1eb415827bef22"} Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.601040 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"fdc692b138aa0e680865b6ba8e7282dc01704826d42f1e85c893a73a634ba781"} Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.604181 4834 generic.go:334] "Generic (PLEG): container finished" podID="d37845a1-60c4-4708-b671-42d20f6a9b34" containerID="a023f5182377bcb47e1cd94ddb70c1b2018ff2983c25a95291a28f80d3923df8" exitCode=0 Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.604262 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7vzgv" event={"ID":"d37845a1-60c4-4708-b671-42d20f6a9b34","Type":"ContainerDied","Data":"a023f5182377bcb47e1cd94ddb70c1b2018ff2983c25a95291a28f80d3923df8"} Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.606520 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_cni-sysctl-allowlist-ds-rlmdc_441917a1-296e-4529-a79f-458faf4769e6/kube-multus-additional-cni-plugins/0.log" Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.606630 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.606736 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/cni-sysctl-allowlist-ds-rlmdc" event={"ID":"441917a1-296e-4529-a79f-458faf4769e6","Type":"ContainerDied","Data":"6ead88ab6c73ff9c9ba14cdc18ad59de3aa8e95987d5371642be9808ca0c77d9"} Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.606845 4834 scope.go:117] "RemoveContainer" containerID="95ec29942c140fb247fc3ef2bd3bbf3e1ed417f5378a449fecd0d42c697f0d3d" Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.617628 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4qbwp" event={"ID":"f20021a4-12a6-49ae-a85f-cc8bdb6051d3","Type":"ContainerStarted","Data":"08b2d4eff732882d0905f6e666eef4e9574c187d7b86c62b5a47c4e21ce6fe14"} Feb 23 09:10:28 crc kubenswrapper[4834]: E0223 09:10:28.618565 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-jw46p" podUID="67be3aab-67ec-42d2-9158-efe9b6ee13e7" Feb 23 09:10:28 crc kubenswrapper[4834]: E0223 09:10:28.621096 4834 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-kk9xp" podUID="4ad50479-c17b-4e80-b57a-ef039e81c612" Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.782690 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-rlmdc"] Feb 23 09:10:28 crc kubenswrapper[4834]: I0223 09:10:28.786849 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-multus/cni-sysctl-allowlist-ds-rlmdc"] Feb 23 09:10:29 crc kubenswrapper[4834]: I0223 09:10:29.089923 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4p48q"] Feb 23 09:10:29 crc kubenswrapper[4834]: I0223 09:10:29.622702 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"b7b400ca-5d3b-41a1-bd67-e4f8e7fca799","Type":"ContainerStarted","Data":"1d5618171f8a3ae3fe7e79ff989d84cfe99eaa75464c3c5387f3d4864f5f44c7"} Feb 23 09:10:29 crc kubenswrapper[4834]: I0223 09:10:29.624654 4834 generic.go:334] "Generic (PLEG): container finished" podID="07261104-4428-40bf-b7f6-10319fa3ba42" containerID="d6b856f71c75a9770d9c5434d8106065db73356751b6209ec3341c6ec883f199" exitCode=0 Feb 23 09:10:29 crc kubenswrapper[4834]: I0223 09:10:29.624711 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4mxvd" event={"ID":"07261104-4428-40bf-b7f6-10319fa3ba42","Type":"ContainerDied","Data":"d6b856f71c75a9770d9c5434d8106065db73356751b6209ec3341c6ec883f199"} Feb 23 09:10:29 crc kubenswrapper[4834]: I0223 09:10:29.628591 4834 generic.go:334] "Generic (PLEG): container finished" podID="f20021a4-12a6-49ae-a85f-cc8bdb6051d3" containerID="08b2d4eff732882d0905f6e666eef4e9574c187d7b86c62b5a47c4e21ce6fe14" exitCode=0 Feb 23 09:10:29 crc kubenswrapper[4834]: I0223 09:10:29.628691 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4qbwp" event={"ID":"f20021a4-12a6-49ae-a85f-cc8bdb6051d3","Type":"ContainerDied","Data":"08b2d4eff732882d0905f6e666eef4e9574c187d7b86c62b5a47c4e21ce6fe14"} Feb 23 09:10:29 crc kubenswrapper[4834]: I0223 09:10:29.639638 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=3.639618617 podStartE2EDuration="3.639618617s" podCreationTimestamp="2026-02-23 09:10:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:10:29.63797672 +0000 UTC m=+165.716291117" watchObservedRunningTime="2026-02-23 09:10:29.639618617 +0000 UTC m=+165.717933014" Feb 23 09:10:30 crc kubenswrapper[4834]: I0223 09:10:30.591785 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="441917a1-296e-4529-a79f-458faf4769e6" path="/var/lib/kubelet/pods/441917a1-296e-4529-a79f-458faf4769e6/volumes" Feb 23 09:10:30 crc kubenswrapper[4834]: I0223 09:10:30.635984 4834 generic.go:334] "Generic (PLEG): container finished" podID="b7b400ca-5d3b-41a1-bd67-e4f8e7fca799" containerID="1d5618171f8a3ae3fe7e79ff989d84cfe99eaa75464c3c5387f3d4864f5f44c7" exitCode=0 Feb 23 09:10:30 crc kubenswrapper[4834]: I0223 09:10:30.636362 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"b7b400ca-5d3b-41a1-bd67-e4f8e7fca799","Type":"ContainerDied","Data":"1d5618171f8a3ae3fe7e79ff989d84cfe99eaa75464c3c5387f3d4864f5f44c7"} Feb 23 09:10:30 crc kubenswrapper[4834]: I0223 09:10:30.638969 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4mxvd" event={"ID":"07261104-4428-40bf-b7f6-10319fa3ba42","Type":"ContainerStarted","Data":"90b0e4fcda78ef25df168ca0ac260de759b7e730ca43c70daf7138d0156a14eb"} Feb 23 09:10:30 crc kubenswrapper[4834]: I0223 09:10:30.642460 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7vzgv" event={"ID":"d37845a1-60c4-4708-b671-42d20f6a9b34","Type":"ContainerStarted","Data":"5f711532503b6688378876a8e28585b1006463ec4f2ff2e5273714fd09e6577b"} Feb 23 09:10:30 crc kubenswrapper[4834]: I0223 09:10:30.644917 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l7rfh" event={"ID":"2c092b1a-4742-4813-9558-2f5b6ca34024","Type":"ContainerStarted","Data":"534562bcba2c2f2509b4058974601090b5dbee86cf66bb8151ba34633b855784"} Feb 23 09:10:30 crc kubenswrapper[4834]: I0223 09:10:30.650771 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4qbwp" event={"ID":"f20021a4-12a6-49ae-a85f-cc8bdb6051d3","Type":"ContainerStarted","Data":"50796e5f4c364f4e55373310d7951f4dce95a9e88380343969274d1ebcd34f97"} Feb 23 09:10:30 crc kubenswrapper[4834]: I0223 09:10:30.654309 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2qm7f" event={"ID":"f51b8d1c-1783-401f-b9b7-aef3b5bbab05","Type":"ContainerStarted","Data":"a16f0c673108a76b8edbebbcc59f0913b5a8daf257ad0adfb910ad612b17405f"} Feb 23 09:10:30 crc kubenswrapper[4834]: I0223 09:10:30.681633 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2qm7f" podStartSLOduration=3.612364055 podStartE2EDuration="40.681605188s" podCreationTimestamp="2026-02-23 09:09:50 +0000 UTC" firstStartedPulling="2026-02-23 09:09:53.037599309 +0000 UTC m=+129.115913706" lastFinishedPulling="2026-02-23 09:10:30.106840452 +0000 UTC m=+166.185154839" observedRunningTime="2026-02-23 09:10:30.678467709 +0000 UTC m=+166.756782096" watchObservedRunningTime="2026-02-23 09:10:30.681605188 +0000 UTC m=+166.759919575" Feb 23 09:10:30 crc kubenswrapper[4834]: I0223 09:10:30.697552 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-l7rfh" podStartSLOduration=2.712008103 podStartE2EDuration="38.697532719s" podCreationTimestamp="2026-02-23 09:09:52 +0000 UTC" firstStartedPulling="2026-02-23 09:09:54.143048492 +0000 UTC m=+130.221362879" lastFinishedPulling="2026-02-23 09:10:30.128573108 +0000 UTC m=+166.206887495" observedRunningTime="2026-02-23 09:10:30.696041908 +0000 UTC m=+166.774356305" watchObservedRunningTime="2026-02-23 09:10:30.697532719 +0000 UTC m=+166.775847106" Feb 23 09:10:30 crc kubenswrapper[4834]: I0223 09:10:30.711022 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4qbwp" podStartSLOduration=1.709968426 podStartE2EDuration="37.711001462s" podCreationTimestamp="2026-02-23 09:09:53 +0000 UTC" firstStartedPulling="2026-02-23 09:09:54.157665067 +0000 UTC m=+130.235979454" lastFinishedPulling="2026-02-23 09:10:30.158698103 +0000 UTC m=+166.237012490" observedRunningTime="2026-02-23 09:10:30.709650224 +0000 UTC m=+166.787964611" watchObservedRunningTime="2026-02-23 09:10:30.711001462 +0000 UTC m=+166.789315849" Feb 23 09:10:30 crc kubenswrapper[4834]: I0223 09:10:30.729937 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7vzgv" podStartSLOduration=2.608407127 podStartE2EDuration="38.729916919s" podCreationTimestamp="2026-02-23 09:09:52 +0000 UTC" firstStartedPulling="2026-02-23 09:09:54.169310189 +0000 UTC m=+130.247624576" lastFinishedPulling="2026-02-23 09:10:30.290819981 +0000 UTC m=+166.369134368" observedRunningTime="2026-02-23 09:10:30.727904601 +0000 UTC m=+166.806218998" watchObservedRunningTime="2026-02-23 09:10:30.729916919 +0000 UTC m=+166.808231306" Feb 23 09:10:30 crc kubenswrapper[4834]: I0223 09:10:30.746864 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4mxvd" podStartSLOduration=2.803837346 podStartE2EDuration="37.746812478s" podCreationTimestamp="2026-02-23 09:09:53 +0000 UTC" firstStartedPulling="2026-02-23 09:09:55.236595904 +0000 UTC m=+131.314910291" lastFinishedPulling="2026-02-23 09:10:30.179571036 +0000 UTC m=+166.257885423" observedRunningTime="2026-02-23 09:10:30.743667699 +0000 UTC m=+166.821982096" watchObservedRunningTime="2026-02-23 09:10:30.746812478 +0000 UTC m=+166.825126865" Feb 23 09:10:31 crc kubenswrapper[4834]: I0223 09:10:31.037810 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2qm7f" Feb 23 09:10:31 crc kubenswrapper[4834]: I0223 09:10:31.037871 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2qm7f" Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.046491 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.170278 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-2qm7f" podUID="f51b8d1c-1783-401f-b9b7-aef3b5bbab05" containerName="registry-server" probeResult="failure" output=< Feb 23 09:10:32 crc kubenswrapper[4834]: timeout: failed to connect service ":50051" within 1s Feb 23 09:10:32 crc kubenswrapper[4834]: > Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.200338 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b7b400ca-5d3b-41a1-bd67-e4f8e7fca799-kube-api-access\") pod \"b7b400ca-5d3b-41a1-bd67-e4f8e7fca799\" (UID: \"b7b400ca-5d3b-41a1-bd67-e4f8e7fca799\") " Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.200566 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b7b400ca-5d3b-41a1-bd67-e4f8e7fca799-kubelet-dir\") pod \"b7b400ca-5d3b-41a1-bd67-e4f8e7fca799\" (UID: \"b7b400ca-5d3b-41a1-bd67-e4f8e7fca799\") " Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.201023 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b7b400ca-5d3b-41a1-bd67-e4f8e7fca799-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "b7b400ca-5d3b-41a1-bd67-e4f8e7fca799" (UID: "b7b400ca-5d3b-41a1-bd67-e4f8e7fca799"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.208241 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7b400ca-5d3b-41a1-bd67-e4f8e7fca799-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "b7b400ca-5d3b-41a1-bd67-e4f8e7fca799" (UID: "b7b400ca-5d3b-41a1-bd67-e4f8e7fca799"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.302896 4834 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/b7b400ca-5d3b-41a1-bd67-e4f8e7fca799-kubelet-dir\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.302934 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b7b400ca-5d3b-41a1-bd67-e4f8e7fca799-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.676312 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.676584 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"b7b400ca-5d3b-41a1-bd67-e4f8e7fca799","Type":"ContainerDied","Data":"fb27044949127394059ea593f514a824e3cde4584c1e8d6d79db17951050c09a"} Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.676620 4834 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fb27044949127394059ea593f514a824e3cde4584c1e8d6d79db17951050c09a" Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.755746 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7vzgv" Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.755834 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7vzgv" Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.920913 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-l7rfh" Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.920989 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-l7rfh" Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.978075 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-l7rfh" Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.994468 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Feb 23 09:10:32 crc kubenswrapper[4834]: E0223 09:10:32.994736 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="441917a1-296e-4529-a79f-458faf4769e6" containerName="kube-multus-additional-cni-plugins" Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.994749 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="441917a1-296e-4529-a79f-458faf4769e6" containerName="kube-multus-additional-cni-plugins" Feb 23 09:10:32 crc kubenswrapper[4834]: E0223 09:10:32.994775 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7b400ca-5d3b-41a1-bd67-e4f8e7fca799" containerName="pruner" Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.994783 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7b400ca-5d3b-41a1-bd67-e4f8e7fca799" containerName="pruner" Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.994899 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7b400ca-5d3b-41a1-bd67-e4f8e7fca799" containerName="pruner" Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.994911 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="441917a1-296e-4529-a79f-458faf4769e6" containerName="kube-multus-additional-cni-plugins" Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.995302 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.997779 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Feb 23 09:10:32 crc kubenswrapper[4834]: I0223 09:10:32.998241 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Feb 23 09:10:33 crc kubenswrapper[4834]: I0223 09:10:33.019743 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Feb 23 09:10:33 crc kubenswrapper[4834]: I0223 09:10:33.113148 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/71f4a070-36cd-4f2b-8b37-1f93aeaee935-var-lock\") pod \"installer-9-crc\" (UID: \"71f4a070-36cd-4f2b-8b37-1f93aeaee935\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 23 09:10:33 crc kubenswrapper[4834]: I0223 09:10:33.113323 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/71f4a070-36cd-4f2b-8b37-1f93aeaee935-kubelet-dir\") pod \"installer-9-crc\" (UID: \"71f4a070-36cd-4f2b-8b37-1f93aeaee935\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 23 09:10:33 crc kubenswrapper[4834]: I0223 09:10:33.113640 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71f4a070-36cd-4f2b-8b37-1f93aeaee935-kube-api-access\") pod \"installer-9-crc\" (UID: \"71f4a070-36cd-4f2b-8b37-1f93aeaee935\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 23 09:10:33 crc kubenswrapper[4834]: I0223 09:10:33.214757 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/71f4a070-36cd-4f2b-8b37-1f93aeaee935-kubelet-dir\") pod \"installer-9-crc\" (UID: \"71f4a070-36cd-4f2b-8b37-1f93aeaee935\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 23 09:10:33 crc kubenswrapper[4834]: I0223 09:10:33.214818 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71f4a070-36cd-4f2b-8b37-1f93aeaee935-kube-api-access\") pod \"installer-9-crc\" (UID: \"71f4a070-36cd-4f2b-8b37-1f93aeaee935\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 23 09:10:33 crc kubenswrapper[4834]: I0223 09:10:33.214863 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/71f4a070-36cd-4f2b-8b37-1f93aeaee935-var-lock\") pod \"installer-9-crc\" (UID: \"71f4a070-36cd-4f2b-8b37-1f93aeaee935\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 23 09:10:33 crc kubenswrapper[4834]: I0223 09:10:33.214914 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/71f4a070-36cd-4f2b-8b37-1f93aeaee935-kubelet-dir\") pod \"installer-9-crc\" (UID: \"71f4a070-36cd-4f2b-8b37-1f93aeaee935\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 23 09:10:33 crc kubenswrapper[4834]: I0223 09:10:33.214948 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/71f4a070-36cd-4f2b-8b37-1f93aeaee935-var-lock\") pod \"installer-9-crc\" (UID: \"71f4a070-36cd-4f2b-8b37-1f93aeaee935\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 23 09:10:33 crc kubenswrapper[4834]: I0223 09:10:33.234235 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71f4a070-36cd-4f2b-8b37-1f93aeaee935-kube-api-access\") pod \"installer-9-crc\" (UID: \"71f4a070-36cd-4f2b-8b37-1f93aeaee935\") " pod="openshift-kube-apiserver/installer-9-crc" Feb 23 09:10:33 crc kubenswrapper[4834]: I0223 09:10:33.314975 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Feb 23 09:10:33 crc kubenswrapper[4834]: I0223 09:10:33.503594 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4qbwp" Feb 23 09:10:33 crc kubenswrapper[4834]: I0223 09:10:33.503678 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4qbwp" Feb 23 09:10:33 crc kubenswrapper[4834]: I0223 09:10:33.761986 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Feb 23 09:10:33 crc kubenswrapper[4834]: W0223 09:10:33.770148 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod71f4a070_36cd_4f2b_8b37_1f93aeaee935.slice/crio-7e0d4567441a82cdacd9e014fd74c143b5cf816c6fc97097626a13cbb59fce65 WatchSource:0}: Error finding container 7e0d4567441a82cdacd9e014fd74c143b5cf816c6fc97097626a13cbb59fce65: Status 404 returned error can't find the container with id 7e0d4567441a82cdacd9e014fd74c143b5cf816c6fc97097626a13cbb59fce65 Feb 23 09:10:33 crc kubenswrapper[4834]: I0223 09:10:33.802375 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-7vzgv" podUID="d37845a1-60c4-4708-b671-42d20f6a9b34" containerName="registry-server" probeResult="failure" output=< Feb 23 09:10:33 crc kubenswrapper[4834]: timeout: failed to connect service ":50051" within 1s Feb 23 09:10:33 crc kubenswrapper[4834]: > Feb 23 09:10:33 crc kubenswrapper[4834]: I0223 09:10:33.971628 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4mxvd" Feb 23 09:10:33 crc kubenswrapper[4834]: I0223 09:10:33.972001 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4mxvd" Feb 23 09:10:34 crc kubenswrapper[4834]: I0223 09:10:34.546418 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4qbwp" podUID="f20021a4-12a6-49ae-a85f-cc8bdb6051d3" containerName="registry-server" probeResult="failure" output=< Feb 23 09:10:34 crc kubenswrapper[4834]: timeout: failed to connect service ":50051" within 1s Feb 23 09:10:34 crc kubenswrapper[4834]: > Feb 23 09:10:34 crc kubenswrapper[4834]: I0223 09:10:34.696953 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"71f4a070-36cd-4f2b-8b37-1f93aeaee935","Type":"ContainerStarted","Data":"51bd37d106375d5cf564364c93d4c90bf980e512a45d0e263861e0740d3ffc92"} Feb 23 09:10:34 crc kubenswrapper[4834]: I0223 09:10:34.697486 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"71f4a070-36cd-4f2b-8b37-1f93aeaee935","Type":"ContainerStarted","Data":"7e0d4567441a82cdacd9e014fd74c143b5cf816c6fc97097626a13cbb59fce65"} Feb 23 09:10:34 crc kubenswrapper[4834]: I0223 09:10:34.720066 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=2.720034459 podStartE2EDuration="2.720034459s" podCreationTimestamp="2026-02-23 09:10:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:10:34.716114548 +0000 UTC m=+170.794428935" watchObservedRunningTime="2026-02-23 09:10:34.720034459 +0000 UTC m=+170.798348836" Feb 23 09:10:35 crc kubenswrapper[4834]: I0223 09:10:35.010757 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-4mxvd" podUID="07261104-4428-40bf-b7f6-10319fa3ba42" containerName="registry-server" probeResult="failure" output=< Feb 23 09:10:35 crc kubenswrapper[4834]: timeout: failed to connect service ":50051" within 1s Feb 23 09:10:35 crc kubenswrapper[4834]: > Feb 23 09:10:38 crc kubenswrapper[4834]: I0223 09:10:38.728734 4834 generic.go:334] "Generic (PLEG): container finished" podID="19479380-b603-400a-99e9-6b8186f42f33" containerID="2ad9d1b521e9d0630b7d26c4f5fc5d6ecd9dd7e1bee7b4659a9998fd998a85dc" exitCode=0 Feb 23 09:10:38 crc kubenswrapper[4834]: I0223 09:10:38.728767 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5vvxt" event={"ID":"19479380-b603-400a-99e9-6b8186f42f33","Type":"ContainerDied","Data":"2ad9d1b521e9d0630b7d26c4f5fc5d6ecd9dd7e1bee7b4659a9998fd998a85dc"} Feb 23 09:10:40 crc kubenswrapper[4834]: I0223 09:10:40.740927 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kk9xp" event={"ID":"4ad50479-c17b-4e80-b57a-ef039e81c612","Type":"ContainerStarted","Data":"dd25652b2b1faad4278de96498f3c88e8f59fcc3bb1fe19f61ea9eb7ae95e849"} Feb 23 09:10:40 crc kubenswrapper[4834]: I0223 09:10:40.742452 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jw46p" event={"ID":"67be3aab-67ec-42d2-9158-efe9b6ee13e7","Type":"ContainerStarted","Data":"46f23e24d9f3c230f7207f1bd6dc737b09dc24e38a02745d2a201235e0ed3055"} Feb 23 09:10:40 crc kubenswrapper[4834]: I0223 09:10:40.745463 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5vvxt" event={"ID":"19479380-b603-400a-99e9-6b8186f42f33","Type":"ContainerStarted","Data":"97cce5f8185524e49f6123b364bfaad63410b3b78bda2d948a50e9fbcf2cfa0a"} Feb 23 09:10:40 crc kubenswrapper[4834]: I0223 09:10:40.759418 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5vvxt" Feb 23 09:10:40 crc kubenswrapper[4834]: I0223 09:10:40.759468 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5vvxt" Feb 23 09:10:40 crc kubenswrapper[4834]: I0223 09:10:40.802729 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5vvxt" podStartSLOduration=2.8062510830000003 podStartE2EDuration="50.802710387s" podCreationTimestamp="2026-02-23 09:09:50 +0000 UTC" firstStartedPulling="2026-02-23 09:09:52.012688828 +0000 UTC m=+128.091003215" lastFinishedPulling="2026-02-23 09:10:40.009148132 +0000 UTC m=+176.087462519" observedRunningTime="2026-02-23 09:10:40.801103761 +0000 UTC m=+176.879418148" watchObservedRunningTime="2026-02-23 09:10:40.802710387 +0000 UTC m=+176.881024774" Feb 23 09:10:41 crc kubenswrapper[4834]: I0223 09:10:41.081432 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2qm7f" Feb 23 09:10:41 crc kubenswrapper[4834]: I0223 09:10:41.124011 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2qm7f" Feb 23 09:10:41 crc kubenswrapper[4834]: I0223 09:10:41.753418 4834 generic.go:334] "Generic (PLEG): container finished" podID="4ad50479-c17b-4e80-b57a-ef039e81c612" containerID="dd25652b2b1faad4278de96498f3c88e8f59fcc3bb1fe19f61ea9eb7ae95e849" exitCode=0 Feb 23 09:10:41 crc kubenswrapper[4834]: I0223 09:10:41.753445 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kk9xp" event={"ID":"4ad50479-c17b-4e80-b57a-ef039e81c612","Type":"ContainerDied","Data":"dd25652b2b1faad4278de96498f3c88e8f59fcc3bb1fe19f61ea9eb7ae95e849"} Feb 23 09:10:41 crc kubenswrapper[4834]: I0223 09:10:41.757444 4834 generic.go:334] "Generic (PLEG): container finished" podID="67be3aab-67ec-42d2-9158-efe9b6ee13e7" containerID="46f23e24d9f3c230f7207f1bd6dc737b09dc24e38a02745d2a201235e0ed3055" exitCode=0 Feb 23 09:10:41 crc kubenswrapper[4834]: I0223 09:10:41.757606 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jw46p" event={"ID":"67be3aab-67ec-42d2-9158-efe9b6ee13e7","Type":"ContainerDied","Data":"46f23e24d9f3c230f7207f1bd6dc737b09dc24e38a02745d2a201235e0ed3055"} Feb 23 09:10:41 crc kubenswrapper[4834]: I0223 09:10:41.798246 4834 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-5vvxt" podUID="19479380-b603-400a-99e9-6b8186f42f33" containerName="registry-server" probeResult="failure" output=< Feb 23 09:10:41 crc kubenswrapper[4834]: timeout: failed to connect service ":50051" within 1s Feb 23 09:10:41 crc kubenswrapper[4834]: > Feb 23 09:10:42 crc kubenswrapper[4834]: I0223 09:10:42.765414 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jw46p" event={"ID":"67be3aab-67ec-42d2-9158-efe9b6ee13e7","Type":"ContainerStarted","Data":"8d934ecdd3c06588a8ae1dec0591791bf95a45f065d23ffbf851d5ca5ad727e6"} Feb 23 09:10:42 crc kubenswrapper[4834]: I0223 09:10:42.767990 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kk9xp" event={"ID":"4ad50479-c17b-4e80-b57a-ef039e81c612","Type":"ContainerStarted","Data":"0fc60fa5822d1ceb486d7e52db5a68b0856fbe5e53317711fb009123f2ed4b6e"} Feb 23 09:10:42 crc kubenswrapper[4834]: I0223 09:10:42.785983 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jw46p" podStartSLOduration=4.688890127 podStartE2EDuration="53.785959932s" podCreationTimestamp="2026-02-23 09:09:49 +0000 UTC" firstStartedPulling="2026-02-23 09:09:53.067351994 +0000 UTC m=+129.145666381" lastFinishedPulling="2026-02-23 09:10:42.164421799 +0000 UTC m=+178.242736186" observedRunningTime="2026-02-23 09:10:42.783833091 +0000 UTC m=+178.862147478" watchObservedRunningTime="2026-02-23 09:10:42.785959932 +0000 UTC m=+178.864274319" Feb 23 09:10:42 crc kubenswrapper[4834]: I0223 09:10:42.795294 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7vzgv" Feb 23 09:10:42 crc kubenswrapper[4834]: I0223 09:10:42.810516 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-kk9xp" podStartSLOduration=3.646564989 podStartE2EDuration="52.810490117s" podCreationTimestamp="2026-02-23 09:09:50 +0000 UTC" firstStartedPulling="2026-02-23 09:09:53.033430184 +0000 UTC m=+129.111744571" lastFinishedPulling="2026-02-23 09:10:42.197355322 +0000 UTC m=+178.275669699" observedRunningTime="2026-02-23 09:10:42.807847582 +0000 UTC m=+178.886161989" watchObservedRunningTime="2026-02-23 09:10:42.810490117 +0000 UTC m=+178.888804514" Feb 23 09:10:42 crc kubenswrapper[4834]: I0223 09:10:42.838132 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7vzgv" Feb 23 09:10:42 crc kubenswrapper[4834]: I0223 09:10:42.965592 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-l7rfh" Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.207961 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2qm7f"] Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.208701 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2qm7f" podUID="f51b8d1c-1783-401f-b9b7-aef3b5bbab05" containerName="registry-server" containerID="cri-o://a16f0c673108a76b8edbebbcc59f0913b5a8daf257ad0adfb910ad612b17405f" gracePeriod=2 Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.556460 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4qbwp" Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.609405 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2qm7f" Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.650826 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4qbwp" Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.771569 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f51b8d1c-1783-401f-b9b7-aef3b5bbab05-catalog-content\") pod \"f51b8d1c-1783-401f-b9b7-aef3b5bbab05\" (UID: \"f51b8d1c-1783-401f-b9b7-aef3b5bbab05\") " Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.771667 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zfb2q\" (UniqueName: \"kubernetes.io/projected/f51b8d1c-1783-401f-b9b7-aef3b5bbab05-kube-api-access-zfb2q\") pod \"f51b8d1c-1783-401f-b9b7-aef3b5bbab05\" (UID: \"f51b8d1c-1783-401f-b9b7-aef3b5bbab05\") " Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.771735 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f51b8d1c-1783-401f-b9b7-aef3b5bbab05-utilities\") pod \"f51b8d1c-1783-401f-b9b7-aef3b5bbab05\" (UID: \"f51b8d1c-1783-401f-b9b7-aef3b5bbab05\") " Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.772517 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f51b8d1c-1783-401f-b9b7-aef3b5bbab05-utilities" (OuterVolumeSpecName: "utilities") pod "f51b8d1c-1783-401f-b9b7-aef3b5bbab05" (UID: "f51b8d1c-1783-401f-b9b7-aef3b5bbab05"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.774503 4834 generic.go:334] "Generic (PLEG): container finished" podID="f51b8d1c-1783-401f-b9b7-aef3b5bbab05" containerID="a16f0c673108a76b8edbebbcc59f0913b5a8daf257ad0adfb910ad612b17405f" exitCode=0 Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.774579 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2qm7f" event={"ID":"f51b8d1c-1783-401f-b9b7-aef3b5bbab05","Type":"ContainerDied","Data":"a16f0c673108a76b8edbebbcc59f0913b5a8daf257ad0adfb910ad612b17405f"} Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.774623 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2qm7f" event={"ID":"f51b8d1c-1783-401f-b9b7-aef3b5bbab05","Type":"ContainerDied","Data":"14579291ace62cc943e0018e2e687fc3d35611b23b0204154c303be72fde7126"} Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.774643 4834 scope.go:117] "RemoveContainer" containerID="a16f0c673108a76b8edbebbcc59f0913b5a8daf257ad0adfb910ad612b17405f" Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.774655 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2qm7f" Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.778519 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f51b8d1c-1783-401f-b9b7-aef3b5bbab05-kube-api-access-zfb2q" (OuterVolumeSpecName: "kube-api-access-zfb2q") pod "f51b8d1c-1783-401f-b9b7-aef3b5bbab05" (UID: "f51b8d1c-1783-401f-b9b7-aef3b5bbab05"). InnerVolumeSpecName "kube-api-access-zfb2q". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.807292 4834 scope.go:117] "RemoveContainer" containerID="233fefb2254ec75643ea01951263eb1e2e6aa53cb26e65fcb3acbd16ce251ea9" Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.824785 4834 scope.go:117] "RemoveContainer" containerID="73b485a117810e038a9da11125cfe3aa8869a0c752f78d7b7fe919f4966f236f" Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.839149 4834 scope.go:117] "RemoveContainer" containerID="a16f0c673108a76b8edbebbcc59f0913b5a8daf257ad0adfb910ad612b17405f" Feb 23 09:10:43 crc kubenswrapper[4834]: E0223 09:10:43.839626 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a16f0c673108a76b8edbebbcc59f0913b5a8daf257ad0adfb910ad612b17405f\": container with ID starting with a16f0c673108a76b8edbebbcc59f0913b5a8daf257ad0adfb910ad612b17405f not found: ID does not exist" containerID="a16f0c673108a76b8edbebbcc59f0913b5a8daf257ad0adfb910ad612b17405f" Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.839661 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a16f0c673108a76b8edbebbcc59f0913b5a8daf257ad0adfb910ad612b17405f"} err="failed to get container status \"a16f0c673108a76b8edbebbcc59f0913b5a8daf257ad0adfb910ad612b17405f\": rpc error: code = NotFound desc = could not find container \"a16f0c673108a76b8edbebbcc59f0913b5a8daf257ad0adfb910ad612b17405f\": container with ID starting with a16f0c673108a76b8edbebbcc59f0913b5a8daf257ad0adfb910ad612b17405f not found: ID does not exist" Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.839691 4834 scope.go:117] "RemoveContainer" containerID="233fefb2254ec75643ea01951263eb1e2e6aa53cb26e65fcb3acbd16ce251ea9" Feb 23 09:10:43 crc kubenswrapper[4834]: E0223 09:10:43.840069 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"233fefb2254ec75643ea01951263eb1e2e6aa53cb26e65fcb3acbd16ce251ea9\": container with ID starting with 233fefb2254ec75643ea01951263eb1e2e6aa53cb26e65fcb3acbd16ce251ea9 not found: ID does not exist" containerID="233fefb2254ec75643ea01951263eb1e2e6aa53cb26e65fcb3acbd16ce251ea9" Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.840092 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"233fefb2254ec75643ea01951263eb1e2e6aa53cb26e65fcb3acbd16ce251ea9"} err="failed to get container status \"233fefb2254ec75643ea01951263eb1e2e6aa53cb26e65fcb3acbd16ce251ea9\": rpc error: code = NotFound desc = could not find container \"233fefb2254ec75643ea01951263eb1e2e6aa53cb26e65fcb3acbd16ce251ea9\": container with ID starting with 233fefb2254ec75643ea01951263eb1e2e6aa53cb26e65fcb3acbd16ce251ea9 not found: ID does not exist" Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.840106 4834 scope.go:117] "RemoveContainer" containerID="73b485a117810e038a9da11125cfe3aa8869a0c752f78d7b7fe919f4966f236f" Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.840138 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f51b8d1c-1783-401f-b9b7-aef3b5bbab05-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f51b8d1c-1783-401f-b9b7-aef3b5bbab05" (UID: "f51b8d1c-1783-401f-b9b7-aef3b5bbab05"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:10:43 crc kubenswrapper[4834]: E0223 09:10:43.840641 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73b485a117810e038a9da11125cfe3aa8869a0c752f78d7b7fe919f4966f236f\": container with ID starting with 73b485a117810e038a9da11125cfe3aa8869a0c752f78d7b7fe919f4966f236f not found: ID does not exist" containerID="73b485a117810e038a9da11125cfe3aa8869a0c752f78d7b7fe919f4966f236f" Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.840696 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73b485a117810e038a9da11125cfe3aa8869a0c752f78d7b7fe919f4966f236f"} err="failed to get container status \"73b485a117810e038a9da11125cfe3aa8869a0c752f78d7b7fe919f4966f236f\": rpc error: code = NotFound desc = could not find container \"73b485a117810e038a9da11125cfe3aa8869a0c752f78d7b7fe919f4966f236f\": container with ID starting with 73b485a117810e038a9da11125cfe3aa8869a0c752f78d7b7fe919f4966f236f not found: ID does not exist" Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.873756 4834 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f51b8d1c-1783-401f-b9b7-aef3b5bbab05-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.873796 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zfb2q\" (UniqueName: \"kubernetes.io/projected/f51b8d1c-1783-401f-b9b7-aef3b5bbab05-kube-api-access-zfb2q\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:43 crc kubenswrapper[4834]: I0223 09:10:43.873810 4834 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f51b8d1c-1783-401f-b9b7-aef3b5bbab05-utilities\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:44 crc kubenswrapper[4834]: I0223 09:10:44.015248 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4mxvd" Feb 23 09:10:44 crc kubenswrapper[4834]: I0223 09:10:44.062060 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4mxvd" Feb 23 09:10:44 crc kubenswrapper[4834]: I0223 09:10:44.103928 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2qm7f"] Feb 23 09:10:44 crc kubenswrapper[4834]: I0223 09:10:44.107481 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2qm7f"] Feb 23 09:10:44 crc kubenswrapper[4834]: I0223 09:10:44.592841 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f51b8d1c-1783-401f-b9b7-aef3b5bbab05" path="/var/lib/kubelet/pods/f51b8d1c-1783-401f-b9b7-aef3b5bbab05/volumes" Feb 23 09:10:44 crc kubenswrapper[4834]: I0223 09:10:44.939442 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-c4657466b-wmdpr"] Feb 23 09:10:44 crc kubenswrapper[4834]: I0223 09:10:44.939712 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" podUID="eb5fc6af-049b-4814-94f0-4a414d59a6ab" containerName="controller-manager" containerID="cri-o://1ba8c1f689c95ea9d67f4102e113d62896b84314d491998bc9dcf0c251a0277a" gracePeriod=30 Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.044953 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn"] Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.045389 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" podUID="b6a3a496-6eb6-4528-9fc1-3f6565cc0a82" containerName="route-controller-manager" containerID="cri-o://ebfc8e187bca5f04d76de2f6e83a3b597e682288b2fbd0ded152f1bc7c47943b" gracePeriod=30 Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.446063 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.449296 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.598198 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/eb5fc6af-049b-4814-94f0-4a414d59a6ab-client-ca\") pod \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\" (UID: \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\") " Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.598267 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5sqx\" (UniqueName: \"kubernetes.io/projected/eb5fc6af-049b-4814-94f0-4a414d59a6ab-kube-api-access-q5sqx\") pod \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\" (UID: \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\") " Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.598299 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/eb5fc6af-049b-4814-94f0-4a414d59a6ab-proxy-ca-bundles\") pod \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\" (UID: \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\") " Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.598338 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-serving-cert\") pod \"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82\" (UID: \"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82\") " Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.598367 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb5fc6af-049b-4814-94f0-4a414d59a6ab-config\") pod \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\" (UID: \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\") " Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.598385 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb5fc6af-049b-4814-94f0-4a414d59a6ab-serving-cert\") pod \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\" (UID: \"eb5fc6af-049b-4814-94f0-4a414d59a6ab\") " Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.598453 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xdh7\" (UniqueName: \"kubernetes.io/projected/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-kube-api-access-8xdh7\") pod \"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82\" (UID: \"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82\") " Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.598472 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-client-ca\") pod \"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82\" (UID: \"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82\") " Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.598494 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-config\") pod \"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82\" (UID: \"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82\") " Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.599229 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-config" (OuterVolumeSpecName: "config") pod "b6a3a496-6eb6-4528-9fc1-3f6565cc0a82" (UID: "b6a3a496-6eb6-4528-9fc1-3f6565cc0a82"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.599489 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb5fc6af-049b-4814-94f0-4a414d59a6ab-client-ca" (OuterVolumeSpecName: "client-ca") pod "eb5fc6af-049b-4814-94f0-4a414d59a6ab" (UID: "eb5fc6af-049b-4814-94f0-4a414d59a6ab"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.605177 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb5fc6af-049b-4814-94f0-4a414d59a6ab-config" (OuterVolumeSpecName: "config") pod "eb5fc6af-049b-4814-94f0-4a414d59a6ab" (UID: "eb5fc6af-049b-4814-94f0-4a414d59a6ab"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.606766 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eb5fc6af-049b-4814-94f0-4a414d59a6ab-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "eb5fc6af-049b-4814-94f0-4a414d59a6ab" (UID: "eb5fc6af-049b-4814-94f0-4a414d59a6ab"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.607605 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-client-ca" (OuterVolumeSpecName: "client-ca") pod "b6a3a496-6eb6-4528-9fc1-3f6565cc0a82" (UID: "b6a3a496-6eb6-4528-9fc1-3f6565cc0a82"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.608747 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "b6a3a496-6eb6-4528-9fc1-3f6565cc0a82" (UID: "b6a3a496-6eb6-4528-9fc1-3f6565cc0a82"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.609732 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l7rfh"] Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.610052 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-l7rfh" podUID="2c092b1a-4742-4813-9558-2f5b6ca34024" containerName="registry-server" containerID="cri-o://534562bcba2c2f2509b4058974601090b5dbee86cf66bb8151ba34633b855784" gracePeriod=2 Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.615413 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eb5fc6af-049b-4814-94f0-4a414d59a6ab-kube-api-access-q5sqx" (OuterVolumeSpecName: "kube-api-access-q5sqx") pod "eb5fc6af-049b-4814-94f0-4a414d59a6ab" (UID: "eb5fc6af-049b-4814-94f0-4a414d59a6ab"). InnerVolumeSpecName "kube-api-access-q5sqx". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.615557 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eb5fc6af-049b-4814-94f0-4a414d59a6ab-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "eb5fc6af-049b-4814-94f0-4a414d59a6ab" (UID: "eb5fc6af-049b-4814-94f0-4a414d59a6ab"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.616981 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-kube-api-access-8xdh7" (OuterVolumeSpecName: "kube-api-access-8xdh7") pod "b6a3a496-6eb6-4528-9fc1-3f6565cc0a82" (UID: "b6a3a496-6eb6-4528-9fc1-3f6565cc0a82"). InnerVolumeSpecName "kube-api-access-8xdh7". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.700117 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xdh7\" (UniqueName: \"kubernetes.io/projected/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-kube-api-access-8xdh7\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.700160 4834 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-client-ca\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.700172 4834 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-config\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.700183 4834 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/eb5fc6af-049b-4814-94f0-4a414d59a6ab-client-ca\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.700191 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5sqx\" (UniqueName: \"kubernetes.io/projected/eb5fc6af-049b-4814-94f0-4a414d59a6ab-kube-api-access-q5sqx\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.700199 4834 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/eb5fc6af-049b-4814-94f0-4a414d59a6ab-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.700207 4834 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.700217 4834 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb5fc6af-049b-4814-94f0-4a414d59a6ab-config\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.700224 4834 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb5fc6af-049b-4814-94f0-4a414d59a6ab-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.788649 4834 generic.go:334] "Generic (PLEG): container finished" podID="eb5fc6af-049b-4814-94f0-4a414d59a6ab" containerID="1ba8c1f689c95ea9d67f4102e113d62896b84314d491998bc9dcf0c251a0277a" exitCode=0 Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.788698 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.788706 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" event={"ID":"eb5fc6af-049b-4814-94f0-4a414d59a6ab","Type":"ContainerDied","Data":"1ba8c1f689c95ea9d67f4102e113d62896b84314d491998bc9dcf0c251a0277a"} Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.788764 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-c4657466b-wmdpr" event={"ID":"eb5fc6af-049b-4814-94f0-4a414d59a6ab","Type":"ContainerDied","Data":"dc723cf1b0c8b6dc29f266a83a7e972cbf88685c5826ed659c1eeb690edfbe79"} Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.788788 4834 scope.go:117] "RemoveContainer" containerID="1ba8c1f689c95ea9d67f4102e113d62896b84314d491998bc9dcf0c251a0277a" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.790958 4834 generic.go:334] "Generic (PLEG): container finished" podID="b6a3a496-6eb6-4528-9fc1-3f6565cc0a82" containerID="ebfc8e187bca5f04d76de2f6e83a3b597e682288b2fbd0ded152f1bc7c47943b" exitCode=0 Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.791013 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.791057 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" event={"ID":"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82","Type":"ContainerDied","Data":"ebfc8e187bca5f04d76de2f6e83a3b597e682288b2fbd0ded152f1bc7c47943b"} Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.791081 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn" event={"ID":"b6a3a496-6eb6-4528-9fc1-3f6565cc0a82","Type":"ContainerDied","Data":"e8e60d84d15a5155c3776f61cdbe18143dd9f0588773927af5c5f9e7551a36cf"} Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.793657 4834 generic.go:334] "Generic (PLEG): container finished" podID="2c092b1a-4742-4813-9558-2f5b6ca34024" containerID="534562bcba2c2f2509b4058974601090b5dbee86cf66bb8151ba34633b855784" exitCode=0 Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.793709 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l7rfh" event={"ID":"2c092b1a-4742-4813-9558-2f5b6ca34024","Type":"ContainerDied","Data":"534562bcba2c2f2509b4058974601090b5dbee86cf66bb8151ba34633b855784"} Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.808308 4834 scope.go:117] "RemoveContainer" containerID="1ba8c1f689c95ea9d67f4102e113d62896b84314d491998bc9dcf0c251a0277a" Feb 23 09:10:45 crc kubenswrapper[4834]: E0223 09:10:45.808767 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1ba8c1f689c95ea9d67f4102e113d62896b84314d491998bc9dcf0c251a0277a\": container with ID starting with 1ba8c1f689c95ea9d67f4102e113d62896b84314d491998bc9dcf0c251a0277a not found: ID does not exist" containerID="1ba8c1f689c95ea9d67f4102e113d62896b84314d491998bc9dcf0c251a0277a" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.808809 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1ba8c1f689c95ea9d67f4102e113d62896b84314d491998bc9dcf0c251a0277a"} err="failed to get container status \"1ba8c1f689c95ea9d67f4102e113d62896b84314d491998bc9dcf0c251a0277a\": rpc error: code = NotFound desc = could not find container \"1ba8c1f689c95ea9d67f4102e113d62896b84314d491998bc9dcf0c251a0277a\": container with ID starting with 1ba8c1f689c95ea9d67f4102e113d62896b84314d491998bc9dcf0c251a0277a not found: ID does not exist" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.808838 4834 scope.go:117] "RemoveContainer" containerID="ebfc8e187bca5f04d76de2f6e83a3b597e682288b2fbd0ded152f1bc7c47943b" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.817259 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-c4657466b-wmdpr"] Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.820271 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-c4657466b-wmdpr"] Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.832493 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn"] Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.835269 4834 scope.go:117] "RemoveContainer" containerID="ebfc8e187bca5f04d76de2f6e83a3b597e682288b2fbd0ded152f1bc7c47943b" Feb 23 09:10:45 crc kubenswrapper[4834]: E0223 09:10:45.835802 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ebfc8e187bca5f04d76de2f6e83a3b597e682288b2fbd0ded152f1bc7c47943b\": container with ID starting with ebfc8e187bca5f04d76de2f6e83a3b597e682288b2fbd0ded152f1bc7c47943b not found: ID does not exist" containerID="ebfc8e187bca5f04d76de2f6e83a3b597e682288b2fbd0ded152f1bc7c47943b" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.835864 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ebfc8e187bca5f04d76de2f6e83a3b597e682288b2fbd0ded152f1bc7c47943b"} err="failed to get container status \"ebfc8e187bca5f04d76de2f6e83a3b597e682288b2fbd0ded152f1bc7c47943b\": rpc error: code = NotFound desc = could not find container \"ebfc8e187bca5f04d76de2f6e83a3b597e682288b2fbd0ded152f1bc7c47943b\": container with ID starting with ebfc8e187bca5f04d76de2f6e83a3b597e682288b2fbd0ded152f1bc7c47943b not found: ID does not exist" Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.836217 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-bc8b6d86d-zcksn"] Feb 23 09:10:45 crc kubenswrapper[4834]: I0223 09:10:45.936889 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l7rfh" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.022438 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-8657d779d8-j47tb"] Feb 23 09:10:46 crc kubenswrapper[4834]: E0223 09:10:46.023537 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eb5fc6af-049b-4814-94f0-4a414d59a6ab" containerName="controller-manager" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.023626 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="eb5fc6af-049b-4814-94f0-4a414d59a6ab" containerName="controller-manager" Feb 23 09:10:46 crc kubenswrapper[4834]: E0223 09:10:46.023692 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6a3a496-6eb6-4528-9fc1-3f6565cc0a82" containerName="route-controller-manager" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.023747 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6a3a496-6eb6-4528-9fc1-3f6565cc0a82" containerName="route-controller-manager" Feb 23 09:10:46 crc kubenswrapper[4834]: E0223 09:10:46.023825 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c092b1a-4742-4813-9558-2f5b6ca34024" containerName="extract-utilities" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.023890 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c092b1a-4742-4813-9558-2f5b6ca34024" containerName="extract-utilities" Feb 23 09:10:46 crc kubenswrapper[4834]: E0223 09:10:46.023951 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f51b8d1c-1783-401f-b9b7-aef3b5bbab05" containerName="extract-content" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.024020 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="f51b8d1c-1783-401f-b9b7-aef3b5bbab05" containerName="extract-content" Feb 23 09:10:46 crc kubenswrapper[4834]: E0223 09:10:46.024094 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f51b8d1c-1783-401f-b9b7-aef3b5bbab05" containerName="extract-utilities" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.024148 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="f51b8d1c-1783-401f-b9b7-aef3b5bbab05" containerName="extract-utilities" Feb 23 09:10:46 crc kubenswrapper[4834]: E0223 09:10:46.024203 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c092b1a-4742-4813-9558-2f5b6ca34024" containerName="registry-server" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.024252 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c092b1a-4742-4813-9558-2f5b6ca34024" containerName="registry-server" Feb 23 09:10:46 crc kubenswrapper[4834]: E0223 09:10:46.024315 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c092b1a-4742-4813-9558-2f5b6ca34024" containerName="extract-content" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.024367 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c092b1a-4742-4813-9558-2f5b6ca34024" containerName="extract-content" Feb 23 09:10:46 crc kubenswrapper[4834]: E0223 09:10:46.024457 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f51b8d1c-1783-401f-b9b7-aef3b5bbab05" containerName="registry-server" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.024513 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="f51b8d1c-1783-401f-b9b7-aef3b5bbab05" containerName="registry-server" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.025325 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c092b1a-4742-4813-9558-2f5b6ca34024" containerName="registry-server" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.025412 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="eb5fc6af-049b-4814-94f0-4a414d59a6ab" containerName="controller-manager" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.025438 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="f51b8d1c-1783-401f-b9b7-aef3b5bbab05" containerName="registry-server" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.025461 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6a3a496-6eb6-4528-9fc1-3f6565cc0a82" containerName="route-controller-manager" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.026422 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.030280 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.030782 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.031176 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.031979 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.040217 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.041541 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.045426 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.051488 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8657d779d8-j47tb"] Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.104277 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c092b1a-4742-4813-9558-2f5b6ca34024-utilities\") pod \"2c092b1a-4742-4813-9558-2f5b6ca34024\" (UID: \"2c092b1a-4742-4813-9558-2f5b6ca34024\") " Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.104893 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sscdv\" (UniqueName: \"kubernetes.io/projected/2c092b1a-4742-4813-9558-2f5b6ca34024-kube-api-access-sscdv\") pod \"2c092b1a-4742-4813-9558-2f5b6ca34024\" (UID: \"2c092b1a-4742-4813-9558-2f5b6ca34024\") " Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.105043 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c092b1a-4742-4813-9558-2f5b6ca34024-catalog-content\") pod \"2c092b1a-4742-4813-9558-2f5b6ca34024\" (UID: \"2c092b1a-4742-4813-9558-2f5b6ca34024\") " Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.105052 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c092b1a-4742-4813-9558-2f5b6ca34024-utilities" (OuterVolumeSpecName: "utilities") pod "2c092b1a-4742-4813-9558-2f5b6ca34024" (UID: "2c092b1a-4742-4813-9558-2f5b6ca34024"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.105472 4834 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2c092b1a-4742-4813-9558-2f5b6ca34024-utilities\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.110197 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c092b1a-4742-4813-9558-2f5b6ca34024-kube-api-access-sscdv" (OuterVolumeSpecName: "kube-api-access-sscdv") pod "2c092b1a-4742-4813-9558-2f5b6ca34024" (UID: "2c092b1a-4742-4813-9558-2f5b6ca34024"). InnerVolumeSpecName "kube-api-access-sscdv". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.130221 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2c092b1a-4742-4813-9558-2f5b6ca34024-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2c092b1a-4742-4813-9558-2f5b6ca34024" (UID: "2c092b1a-4742-4813-9558-2f5b6ca34024"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.206486 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c803df1e-f605-4117-bf40-dabc658a6821-serving-cert\") pod \"controller-manager-8657d779d8-j47tb\" (UID: \"c803df1e-f605-4117-bf40-dabc658a6821\") " pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.206551 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c803df1e-f605-4117-bf40-dabc658a6821-proxy-ca-bundles\") pod \"controller-manager-8657d779d8-j47tb\" (UID: \"c803df1e-f605-4117-bf40-dabc658a6821\") " pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.206577 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c803df1e-f605-4117-bf40-dabc658a6821-client-ca\") pod \"controller-manager-8657d779d8-j47tb\" (UID: \"c803df1e-f605-4117-bf40-dabc658a6821\") " pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.206597 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c803df1e-f605-4117-bf40-dabc658a6821-config\") pod \"controller-manager-8657d779d8-j47tb\" (UID: \"c803df1e-f605-4117-bf40-dabc658a6821\") " pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.206617 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n8nck\" (UniqueName: \"kubernetes.io/projected/c803df1e-f605-4117-bf40-dabc658a6821-kube-api-access-n8nck\") pod \"controller-manager-8657d779d8-j47tb\" (UID: \"c803df1e-f605-4117-bf40-dabc658a6821\") " pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.206658 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sscdv\" (UniqueName: \"kubernetes.io/projected/2c092b1a-4742-4813-9558-2f5b6ca34024-kube-api-access-sscdv\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.206669 4834 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2c092b1a-4742-4813-9558-2f5b6ca34024-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.308188 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c803df1e-f605-4117-bf40-dabc658a6821-proxy-ca-bundles\") pod \"controller-manager-8657d779d8-j47tb\" (UID: \"c803df1e-f605-4117-bf40-dabc658a6821\") " pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.308333 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c803df1e-f605-4117-bf40-dabc658a6821-client-ca\") pod \"controller-manager-8657d779d8-j47tb\" (UID: \"c803df1e-f605-4117-bf40-dabc658a6821\") " pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.308452 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c803df1e-f605-4117-bf40-dabc658a6821-config\") pod \"controller-manager-8657d779d8-j47tb\" (UID: \"c803df1e-f605-4117-bf40-dabc658a6821\") " pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.309106 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n8nck\" (UniqueName: \"kubernetes.io/projected/c803df1e-f605-4117-bf40-dabc658a6821-kube-api-access-n8nck\") pod \"controller-manager-8657d779d8-j47tb\" (UID: \"c803df1e-f605-4117-bf40-dabc658a6821\") " pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.309266 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c803df1e-f605-4117-bf40-dabc658a6821-serving-cert\") pod \"controller-manager-8657d779d8-j47tb\" (UID: \"c803df1e-f605-4117-bf40-dabc658a6821\") " pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.309834 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c803df1e-f605-4117-bf40-dabc658a6821-client-ca\") pod \"controller-manager-8657d779d8-j47tb\" (UID: \"c803df1e-f605-4117-bf40-dabc658a6821\") " pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.309979 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c803df1e-f605-4117-bf40-dabc658a6821-proxy-ca-bundles\") pod \"controller-manager-8657d779d8-j47tb\" (UID: \"c803df1e-f605-4117-bf40-dabc658a6821\") " pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.310954 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c803df1e-f605-4117-bf40-dabc658a6821-config\") pod \"controller-manager-8657d779d8-j47tb\" (UID: \"c803df1e-f605-4117-bf40-dabc658a6821\") " pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.422628 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c803df1e-f605-4117-bf40-dabc658a6821-serving-cert\") pod \"controller-manager-8657d779d8-j47tb\" (UID: \"c803df1e-f605-4117-bf40-dabc658a6821\") " pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.422935 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n8nck\" (UniqueName: \"kubernetes.io/projected/c803df1e-f605-4117-bf40-dabc658a6821-kube-api-access-n8nck\") pod \"controller-manager-8657d779d8-j47tb\" (UID: \"c803df1e-f605-4117-bf40-dabc658a6821\") " pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.594684 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6a3a496-6eb6-4528-9fc1-3f6565cc0a82" path="/var/lib/kubelet/pods/b6a3a496-6eb6-4528-9fc1-3f6565cc0a82/volumes" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.595574 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eb5fc6af-049b-4814-94f0-4a414d59a6ab" path="/var/lib/kubelet/pods/eb5fc6af-049b-4814-94f0-4a414d59a6ab/volumes" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.650220 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.817789 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l7rfh" event={"ID":"2c092b1a-4742-4813-9558-2f5b6ca34024","Type":"ContainerDied","Data":"6fd5c1571e0efbb9c7b8c3ca47e37a6b3f923dee98ca02f27df51e12a1eed6e0"} Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.817825 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l7rfh" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.818249 4834 scope.go:117] "RemoveContainer" containerID="534562bcba2c2f2509b4058974601090b5dbee86cf66bb8151ba34633b855784" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.836847 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l7rfh"] Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.840041 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-l7rfh"] Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.855067 4834 scope.go:117] "RemoveContainer" containerID="a305c850830ffcb63fa2c6d557c480fdca57de0a1559f8dc6331ad8d4380237f" Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.866347 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-8657d779d8-j47tb"] Feb 23 09:10:46 crc kubenswrapper[4834]: I0223 09:10:46.869944 4834 scope.go:117] "RemoveContainer" containerID="5e2d0c921537e32c58f758bbdab57848d1b66f6ef66c7cc17c843299f274bff3" Feb 23 09:10:46 crc kubenswrapper[4834]: W0223 09:10:46.883107 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc803df1e_f605_4117_bf40_dabc658a6821.slice/crio-d2f6a119fc17a1f326b87c589062185b49c0023508a911e2e7f4291a4f0aa294 WatchSource:0}: Error finding container d2f6a119fc17a1f326b87c589062185b49c0023508a911e2e7f4291a4f0aa294: Status 404 returned error can't find the container with id d2f6a119fc17a1f326b87c589062185b49c0023508a911e2e7f4291a4f0aa294 Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.025360 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh"] Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.026676 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.028287 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.028465 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.028643 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.029756 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.030294 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.031093 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.047245 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh"] Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.122553 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8be5b6f3-e560-446d-afb1-2160a964a09e-config\") pod \"route-controller-manager-5775d66fc8-btxjh\" (UID: \"8be5b6f3-e560-446d-afb1-2160a964a09e\") " pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.122663 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8be5b6f3-e560-446d-afb1-2160a964a09e-client-ca\") pod \"route-controller-manager-5775d66fc8-btxjh\" (UID: \"8be5b6f3-e560-446d-afb1-2160a964a09e\") " pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.122695 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7b5ps\" (UniqueName: \"kubernetes.io/projected/8be5b6f3-e560-446d-afb1-2160a964a09e-kube-api-access-7b5ps\") pod \"route-controller-manager-5775d66fc8-btxjh\" (UID: \"8be5b6f3-e560-446d-afb1-2160a964a09e\") " pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.122739 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8be5b6f3-e560-446d-afb1-2160a964a09e-serving-cert\") pod \"route-controller-manager-5775d66fc8-btxjh\" (UID: \"8be5b6f3-e560-446d-afb1-2160a964a09e\") " pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.224022 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8be5b6f3-e560-446d-afb1-2160a964a09e-client-ca\") pod \"route-controller-manager-5775d66fc8-btxjh\" (UID: \"8be5b6f3-e560-446d-afb1-2160a964a09e\") " pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.224065 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7b5ps\" (UniqueName: \"kubernetes.io/projected/8be5b6f3-e560-446d-afb1-2160a964a09e-kube-api-access-7b5ps\") pod \"route-controller-manager-5775d66fc8-btxjh\" (UID: \"8be5b6f3-e560-446d-afb1-2160a964a09e\") " pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.224102 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8be5b6f3-e560-446d-afb1-2160a964a09e-serving-cert\") pod \"route-controller-manager-5775d66fc8-btxjh\" (UID: \"8be5b6f3-e560-446d-afb1-2160a964a09e\") " pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.224170 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8be5b6f3-e560-446d-afb1-2160a964a09e-config\") pod \"route-controller-manager-5775d66fc8-btxjh\" (UID: \"8be5b6f3-e560-446d-afb1-2160a964a09e\") " pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.225539 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8be5b6f3-e560-446d-afb1-2160a964a09e-config\") pod \"route-controller-manager-5775d66fc8-btxjh\" (UID: \"8be5b6f3-e560-446d-afb1-2160a964a09e\") " pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.226236 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8be5b6f3-e560-446d-afb1-2160a964a09e-client-ca\") pod \"route-controller-manager-5775d66fc8-btxjh\" (UID: \"8be5b6f3-e560-446d-afb1-2160a964a09e\") " pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.233975 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8be5b6f3-e560-446d-afb1-2160a964a09e-serving-cert\") pod \"route-controller-manager-5775d66fc8-btxjh\" (UID: \"8be5b6f3-e560-446d-afb1-2160a964a09e\") " pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.248386 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7b5ps\" (UniqueName: \"kubernetes.io/projected/8be5b6f3-e560-446d-afb1-2160a964a09e-kube-api-access-7b5ps\") pod \"route-controller-manager-5775d66fc8-btxjh\" (UID: \"8be5b6f3-e560-446d-afb1-2160a964a09e\") " pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.352707 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.614368 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh"] Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.828911 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" event={"ID":"c803df1e-f605-4117-bf40-dabc658a6821","Type":"ContainerStarted","Data":"d23cf760a09a8974fb5b8e7a3ce3829778982fc2831bdddf8641d1208672f962"} Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.829433 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.829450 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" event={"ID":"c803df1e-f605-4117-bf40-dabc658a6821","Type":"ContainerStarted","Data":"d2f6a119fc17a1f326b87c589062185b49c0023508a911e2e7f4291a4f0aa294"} Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.830574 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" event={"ID":"8be5b6f3-e560-446d-afb1-2160a964a09e","Type":"ContainerStarted","Data":"462fa6c40747949678b926be50c9abc27404cfddc5fdbe6787eb182d3921aebf"} Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.837012 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" Feb 23 09:10:47 crc kubenswrapper[4834]: I0223 09:10:47.847837 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" podStartSLOduration=3.847811417 podStartE2EDuration="3.847811417s" podCreationTimestamp="2026-02-23 09:10:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:10:47.846180142 +0000 UTC m=+183.924494529" watchObservedRunningTime="2026-02-23 09:10:47.847811417 +0000 UTC m=+183.926125804" Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.005909 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4mxvd"] Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.006243 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4mxvd" podUID="07261104-4428-40bf-b7f6-10319fa3ba42" containerName="registry-server" containerID="cri-o://90b0e4fcda78ef25df168ca0ac260de759b7e730ca43c70daf7138d0156a14eb" gracePeriod=2 Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.352944 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4mxvd" Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.541510 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07261104-4428-40bf-b7f6-10319fa3ba42-catalog-content\") pod \"07261104-4428-40bf-b7f6-10319fa3ba42\" (UID: \"07261104-4428-40bf-b7f6-10319fa3ba42\") " Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.541587 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07261104-4428-40bf-b7f6-10319fa3ba42-utilities\") pod \"07261104-4428-40bf-b7f6-10319fa3ba42\" (UID: \"07261104-4428-40bf-b7f6-10319fa3ba42\") " Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.541623 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wzx7h\" (UniqueName: \"kubernetes.io/projected/07261104-4428-40bf-b7f6-10319fa3ba42-kube-api-access-wzx7h\") pod \"07261104-4428-40bf-b7f6-10319fa3ba42\" (UID: \"07261104-4428-40bf-b7f6-10319fa3ba42\") " Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.542919 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07261104-4428-40bf-b7f6-10319fa3ba42-utilities" (OuterVolumeSpecName: "utilities") pod "07261104-4428-40bf-b7f6-10319fa3ba42" (UID: "07261104-4428-40bf-b7f6-10319fa3ba42"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.549259 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07261104-4428-40bf-b7f6-10319fa3ba42-kube-api-access-wzx7h" (OuterVolumeSpecName: "kube-api-access-wzx7h") pod "07261104-4428-40bf-b7f6-10319fa3ba42" (UID: "07261104-4428-40bf-b7f6-10319fa3ba42"). InnerVolumeSpecName "kube-api-access-wzx7h". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.593190 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c092b1a-4742-4813-9558-2f5b6ca34024" path="/var/lib/kubelet/pods/2c092b1a-4742-4813-9558-2f5b6ca34024/volumes" Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.643612 4834 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07261104-4428-40bf-b7f6-10319fa3ba42-utilities\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.643649 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wzx7h\" (UniqueName: \"kubernetes.io/projected/07261104-4428-40bf-b7f6-10319fa3ba42-kube-api-access-wzx7h\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.669211 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07261104-4428-40bf-b7f6-10319fa3ba42-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "07261104-4428-40bf-b7f6-10319fa3ba42" (UID: "07261104-4428-40bf-b7f6-10319fa3ba42"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.745025 4834 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07261104-4428-40bf-b7f6-10319fa3ba42-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.837389 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" event={"ID":"8be5b6f3-e560-446d-afb1-2160a964a09e","Type":"ContainerStarted","Data":"0ac6b2f25d7df92a8486944e362ffa04a5a29107497e7010b319b4dfee825279"} Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.839112 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.844122 4834 generic.go:334] "Generic (PLEG): container finished" podID="07261104-4428-40bf-b7f6-10319fa3ba42" containerID="90b0e4fcda78ef25df168ca0ac260de759b7e730ca43c70daf7138d0156a14eb" exitCode=0 Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.844230 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4mxvd" Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.844640 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4mxvd" event={"ID":"07261104-4428-40bf-b7f6-10319fa3ba42","Type":"ContainerDied","Data":"90b0e4fcda78ef25df168ca0ac260de759b7e730ca43c70daf7138d0156a14eb"} Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.844742 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4mxvd" event={"ID":"07261104-4428-40bf-b7f6-10319fa3ba42","Type":"ContainerDied","Data":"6661c1b803ad3bd67b4d3faeb2f86851e669c7e9cdc0ee07c960eb431c3375fa"} Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.844770 4834 scope.go:117] "RemoveContainer" containerID="90b0e4fcda78ef25df168ca0ac260de759b7e730ca43c70daf7138d0156a14eb" Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.847737 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.861049 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" podStartSLOduration=3.861026903 podStartE2EDuration="3.861026903s" podCreationTimestamp="2026-02-23 09:10:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:10:48.860134558 +0000 UTC m=+184.938448955" watchObservedRunningTime="2026-02-23 09:10:48.861026903 +0000 UTC m=+184.939341290" Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.867156 4834 scope.go:117] "RemoveContainer" containerID="d6b856f71c75a9770d9c5434d8106065db73356751b6209ec3341c6ec883f199" Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.898427 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4mxvd"] Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.904754 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4mxvd"] Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.913465 4834 scope.go:117] "RemoveContainer" containerID="277c46fb56097c4601efc79e71889d5e4dee68e9ba0349169c240f4d1dfc7a41" Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.935971 4834 scope.go:117] "RemoveContainer" containerID="90b0e4fcda78ef25df168ca0ac260de759b7e730ca43c70daf7138d0156a14eb" Feb 23 09:10:48 crc kubenswrapper[4834]: E0223 09:10:48.936595 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90b0e4fcda78ef25df168ca0ac260de759b7e730ca43c70daf7138d0156a14eb\": container with ID starting with 90b0e4fcda78ef25df168ca0ac260de759b7e730ca43c70daf7138d0156a14eb not found: ID does not exist" containerID="90b0e4fcda78ef25df168ca0ac260de759b7e730ca43c70daf7138d0156a14eb" Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.936678 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90b0e4fcda78ef25df168ca0ac260de759b7e730ca43c70daf7138d0156a14eb"} err="failed to get container status \"90b0e4fcda78ef25df168ca0ac260de759b7e730ca43c70daf7138d0156a14eb\": rpc error: code = NotFound desc = could not find container \"90b0e4fcda78ef25df168ca0ac260de759b7e730ca43c70daf7138d0156a14eb\": container with ID starting with 90b0e4fcda78ef25df168ca0ac260de759b7e730ca43c70daf7138d0156a14eb not found: ID does not exist" Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.936711 4834 scope.go:117] "RemoveContainer" containerID="d6b856f71c75a9770d9c5434d8106065db73356751b6209ec3341c6ec883f199" Feb 23 09:10:48 crc kubenswrapper[4834]: E0223 09:10:48.936944 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6b856f71c75a9770d9c5434d8106065db73356751b6209ec3341c6ec883f199\": container with ID starting with d6b856f71c75a9770d9c5434d8106065db73356751b6209ec3341c6ec883f199 not found: ID does not exist" containerID="d6b856f71c75a9770d9c5434d8106065db73356751b6209ec3341c6ec883f199" Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.936965 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6b856f71c75a9770d9c5434d8106065db73356751b6209ec3341c6ec883f199"} err="failed to get container status \"d6b856f71c75a9770d9c5434d8106065db73356751b6209ec3341c6ec883f199\": rpc error: code = NotFound desc = could not find container \"d6b856f71c75a9770d9c5434d8106065db73356751b6209ec3341c6ec883f199\": container with ID starting with d6b856f71c75a9770d9c5434d8106065db73356751b6209ec3341c6ec883f199 not found: ID does not exist" Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.936977 4834 scope.go:117] "RemoveContainer" containerID="277c46fb56097c4601efc79e71889d5e4dee68e9ba0349169c240f4d1dfc7a41" Feb 23 09:10:48 crc kubenswrapper[4834]: E0223 09:10:48.937199 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"277c46fb56097c4601efc79e71889d5e4dee68e9ba0349169c240f4d1dfc7a41\": container with ID starting with 277c46fb56097c4601efc79e71889d5e4dee68e9ba0349169c240f4d1dfc7a41 not found: ID does not exist" containerID="277c46fb56097c4601efc79e71889d5e4dee68e9ba0349169c240f4d1dfc7a41" Feb 23 09:10:48 crc kubenswrapper[4834]: I0223 09:10:48.937226 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"277c46fb56097c4601efc79e71889d5e4dee68e9ba0349169c240f4d1dfc7a41"} err="failed to get container status \"277c46fb56097c4601efc79e71889d5e4dee68e9ba0349169c240f4d1dfc7a41\": rpc error: code = NotFound desc = could not find container \"277c46fb56097c4601efc79e71889d5e4dee68e9ba0349169c240f4d1dfc7a41\": container with ID starting with 277c46fb56097c4601efc79e71889d5e4dee68e9ba0349169c240f4d1dfc7a41 not found: ID does not exist" Feb 23 09:10:50 crc kubenswrapper[4834]: I0223 09:10:50.570583 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jw46p" Feb 23 09:10:50 crc kubenswrapper[4834]: I0223 09:10:50.570653 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jw46p" Feb 23 09:10:50 crc kubenswrapper[4834]: I0223 09:10:50.595858 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07261104-4428-40bf-b7f6-10319fa3ba42" path="/var/lib/kubelet/pods/07261104-4428-40bf-b7f6-10319fa3ba42/volumes" Feb 23 09:10:50 crc kubenswrapper[4834]: I0223 09:10:50.621317 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jw46p" Feb 23 09:10:50 crc kubenswrapper[4834]: I0223 09:10:50.803337 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5vvxt" Feb 23 09:10:50 crc kubenswrapper[4834]: I0223 09:10:50.845770 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5vvxt" Feb 23 09:10:50 crc kubenswrapper[4834]: I0223 09:10:50.901324 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jw46p" Feb 23 09:10:51 crc kubenswrapper[4834]: I0223 09:10:51.248390 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-kk9xp" Feb 23 09:10:51 crc kubenswrapper[4834]: I0223 09:10:51.248736 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-kk9xp" Feb 23 09:10:51 crc kubenswrapper[4834]: I0223 09:10:51.290638 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-kk9xp" Feb 23 09:10:51 crc kubenswrapper[4834]: I0223 09:10:51.911004 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-kk9xp" Feb 23 09:10:52 crc kubenswrapper[4834]: I0223 09:10:52.805726 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kk9xp"] Feb 23 09:10:53 crc kubenswrapper[4834]: I0223 09:10:53.874941 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-kk9xp" podUID="4ad50479-c17b-4e80-b57a-ef039e81c612" containerName="registry-server" containerID="cri-o://0fc60fa5822d1ceb486d7e52db5a68b0856fbe5e53317711fb009123f2ed4b6e" gracePeriod=2 Feb 23 09:10:54 crc kubenswrapper[4834]: I0223 09:10:54.122531 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" podUID="a08a0a0e-319b-4021-992e-9cc5fa8ae4cd" containerName="oauth-openshift" containerID="cri-o://8369d4e8ebce379a776a0eaa126786ab666b5c9fd5576ea4bb678328bff9c1e2" gracePeriod=15 Feb 23 09:10:54 crc kubenswrapper[4834]: I0223 09:10:54.356676 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kk9xp" Feb 23 09:10:54 crc kubenswrapper[4834]: I0223 09:10:54.920038 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ad50479-c17b-4e80-b57a-ef039e81c612-utilities\") pod \"4ad50479-c17b-4e80-b57a-ef039e81c612\" (UID: \"4ad50479-c17b-4e80-b57a-ef039e81c612\") " Feb 23 09:10:54 crc kubenswrapper[4834]: I0223 09:10:54.920097 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ad50479-c17b-4e80-b57a-ef039e81c612-catalog-content\") pod \"4ad50479-c17b-4e80-b57a-ef039e81c612\" (UID: \"4ad50479-c17b-4e80-b57a-ef039e81c612\") " Feb 23 09:10:54 crc kubenswrapper[4834]: I0223 09:10:54.920176 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ktfgk\" (UniqueName: \"kubernetes.io/projected/4ad50479-c17b-4e80-b57a-ef039e81c612-kube-api-access-ktfgk\") pod \"4ad50479-c17b-4e80-b57a-ef039e81c612\" (UID: \"4ad50479-c17b-4e80-b57a-ef039e81c612\") " Feb 23 09:10:54 crc kubenswrapper[4834]: I0223 09:10:54.923975 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ad50479-c17b-4e80-b57a-ef039e81c612-utilities" (OuterVolumeSpecName: "utilities") pod "4ad50479-c17b-4e80-b57a-ef039e81c612" (UID: "4ad50479-c17b-4e80-b57a-ef039e81c612"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:10:54 crc kubenswrapper[4834]: I0223 09:10:54.927686 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4ad50479-c17b-4e80-b57a-ef039e81c612-kube-api-access-ktfgk" (OuterVolumeSpecName: "kube-api-access-ktfgk") pod "4ad50479-c17b-4e80-b57a-ef039e81c612" (UID: "4ad50479-c17b-4e80-b57a-ef039e81c612"). InnerVolumeSpecName "kube-api-access-ktfgk". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:10:54 crc kubenswrapper[4834]: I0223 09:10:54.932210 4834 generic.go:334] "Generic (PLEG): container finished" podID="4ad50479-c17b-4e80-b57a-ef039e81c612" containerID="0fc60fa5822d1ceb486d7e52db5a68b0856fbe5e53317711fb009123f2ed4b6e" exitCode=0 Feb 23 09:10:54 crc kubenswrapper[4834]: I0223 09:10:54.932360 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-kk9xp" Feb 23 09:10:54 crc kubenswrapper[4834]: I0223 09:10:54.933893 4834 generic.go:334] "Generic (PLEG): container finished" podID="a08a0a0e-319b-4021-992e-9cc5fa8ae4cd" containerID="8369d4e8ebce379a776a0eaa126786ab666b5c9fd5576ea4bb678328bff9c1e2" exitCode=0 Feb 23 09:10:54 crc kubenswrapper[4834]: I0223 09:10:54.943007 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kk9xp" event={"ID":"4ad50479-c17b-4e80-b57a-ef039e81c612","Type":"ContainerDied","Data":"0fc60fa5822d1ceb486d7e52db5a68b0856fbe5e53317711fb009123f2ed4b6e"} Feb 23 09:10:54 crc kubenswrapper[4834]: I0223 09:10:54.943053 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-kk9xp" event={"ID":"4ad50479-c17b-4e80-b57a-ef039e81c612","Type":"ContainerDied","Data":"242c1377008eec11bb58cf77c93fef8e8fea62a361a086042e005dc9fd173fa3"} Feb 23 09:10:54 crc kubenswrapper[4834]: I0223 09:10:54.943065 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" event={"ID":"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd","Type":"ContainerDied","Data":"8369d4e8ebce379a776a0eaa126786ab666b5c9fd5576ea4bb678328bff9c1e2"} Feb 23 09:10:54 crc kubenswrapper[4834]: I0223 09:10:54.943090 4834 scope.go:117] "RemoveContainer" containerID="0fc60fa5822d1ceb486d7e52db5a68b0856fbe5e53317711fb009123f2ed4b6e" Feb 23 09:10:54 crc kubenswrapper[4834]: I0223 09:10:54.948835 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:10:54 crc kubenswrapper[4834]: I0223 09:10:54.965230 4834 scope.go:117] "RemoveContainer" containerID="dd25652b2b1faad4278de96498f3c88e8f59fcc3bb1fe19f61ea9eb7ae95e849" Feb 23 09:10:54 crc kubenswrapper[4834]: I0223 09:10:54.981326 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4ad50479-c17b-4e80-b57a-ef039e81c612-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4ad50479-c17b-4e80-b57a-ef039e81c612" (UID: "4ad50479-c17b-4e80-b57a-ef039e81c612"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:10:54 crc kubenswrapper[4834]: I0223 09:10:54.989882 4834 scope.go:117] "RemoveContainer" containerID="db7a6d1c9c649dd07afcb602b61c59c0acbf77f6f74386a72e8e38f881c2826f" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.007281 4834 scope.go:117] "RemoveContainer" containerID="0fc60fa5822d1ceb486d7e52db5a68b0856fbe5e53317711fb009123f2ed4b6e" Feb 23 09:10:55 crc kubenswrapper[4834]: E0223 09:10:55.007970 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0fc60fa5822d1ceb486d7e52db5a68b0856fbe5e53317711fb009123f2ed4b6e\": container with ID starting with 0fc60fa5822d1ceb486d7e52db5a68b0856fbe5e53317711fb009123f2ed4b6e not found: ID does not exist" containerID="0fc60fa5822d1ceb486d7e52db5a68b0856fbe5e53317711fb009123f2ed4b6e" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.008024 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0fc60fa5822d1ceb486d7e52db5a68b0856fbe5e53317711fb009123f2ed4b6e"} err="failed to get container status \"0fc60fa5822d1ceb486d7e52db5a68b0856fbe5e53317711fb009123f2ed4b6e\": rpc error: code = NotFound desc = could not find container \"0fc60fa5822d1ceb486d7e52db5a68b0856fbe5e53317711fb009123f2ed4b6e\": container with ID starting with 0fc60fa5822d1ceb486d7e52db5a68b0856fbe5e53317711fb009123f2ed4b6e not found: ID does not exist" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.008054 4834 scope.go:117] "RemoveContainer" containerID="dd25652b2b1faad4278de96498f3c88e8f59fcc3bb1fe19f61ea9eb7ae95e849" Feb 23 09:10:55 crc kubenswrapper[4834]: E0223 09:10:55.009099 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dd25652b2b1faad4278de96498f3c88e8f59fcc3bb1fe19f61ea9eb7ae95e849\": container with ID starting with dd25652b2b1faad4278de96498f3c88e8f59fcc3bb1fe19f61ea9eb7ae95e849 not found: ID does not exist" containerID="dd25652b2b1faad4278de96498f3c88e8f59fcc3bb1fe19f61ea9eb7ae95e849" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.009205 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dd25652b2b1faad4278de96498f3c88e8f59fcc3bb1fe19f61ea9eb7ae95e849"} err="failed to get container status \"dd25652b2b1faad4278de96498f3c88e8f59fcc3bb1fe19f61ea9eb7ae95e849\": rpc error: code = NotFound desc = could not find container \"dd25652b2b1faad4278de96498f3c88e8f59fcc3bb1fe19f61ea9eb7ae95e849\": container with ID starting with dd25652b2b1faad4278de96498f3c88e8f59fcc3bb1fe19f61ea9eb7ae95e849 not found: ID does not exist" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.009264 4834 scope.go:117] "RemoveContainer" containerID="db7a6d1c9c649dd07afcb602b61c59c0acbf77f6f74386a72e8e38f881c2826f" Feb 23 09:10:55 crc kubenswrapper[4834]: E0223 09:10:55.010049 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db7a6d1c9c649dd07afcb602b61c59c0acbf77f6f74386a72e8e38f881c2826f\": container with ID starting with db7a6d1c9c649dd07afcb602b61c59c0acbf77f6f74386a72e8e38f881c2826f not found: ID does not exist" containerID="db7a6d1c9c649dd07afcb602b61c59c0acbf77f6f74386a72e8e38f881c2826f" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.010124 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db7a6d1c9c649dd07afcb602b61c59c0acbf77f6f74386a72e8e38f881c2826f"} err="failed to get container status \"db7a6d1c9c649dd07afcb602b61c59c0acbf77f6f74386a72e8e38f881c2826f\": rpc error: code = NotFound desc = could not find container \"db7a6d1c9c649dd07afcb602b61c59c0acbf77f6f74386a72e8e38f881c2826f\": container with ID starting with db7a6d1c9c649dd07afcb602b61c59c0acbf77f6f74386a72e8e38f881c2826f not found: ID does not exist" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.021084 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ktfgk\" (UniqueName: \"kubernetes.io/projected/4ad50479-c17b-4e80-b57a-ef039e81c612-kube-api-access-ktfgk\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.021266 4834 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4ad50479-c17b-4e80-b57a-ef039e81c612-utilities\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.021379 4834 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4ad50479-c17b-4e80-b57a-ef039e81c612-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.121895 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-trusted-ca-bundle\") pod \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.121960 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-service-ca\") pod \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.122020 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-template-error\") pod \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.122052 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-session\") pod \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.122087 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-cliconfig\") pod \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.122126 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-idp-0-file-data\") pod \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.122158 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-audit-policies\") pod \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.122189 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-ocp-branding-template\") pod \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.122223 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-router-certs\") pod \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.122250 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dkc6n\" (UniqueName: \"kubernetes.io/projected/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-kube-api-access-dkc6n\") pod \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.122278 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-audit-dir\") pod \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.122315 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-template-login\") pod \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.122354 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-serving-cert\") pod \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.122388 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-template-provider-selection\") pod \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\" (UID: \"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd\") " Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.123468 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd" (UID: "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.123477 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd" (UID: "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.123505 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd" (UID: "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.124237 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd" (UID: "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.124319 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd" (UID: "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.126834 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd" (UID: "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.127143 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd" (UID: "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.127354 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd" (UID: "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.127590 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd" (UID: "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.127902 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd" (UID: "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.135640 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd" (UID: "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.135952 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd" (UID: "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.135985 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd" (UID: "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.136115 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-kube-api-access-dkc6n" (OuterVolumeSpecName: "kube-api-access-dkc6n") pod "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd" (UID: "a08a0a0e-319b-4021-992e-9cc5fa8ae4cd"). InnerVolumeSpecName "kube-api-access-dkc6n". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.224318 4834 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.224375 4834 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.224438 4834 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.224459 4834 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.224478 4834 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.224503 4834 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.224529 4834 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.224553 4834 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.224572 4834 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-audit-policies\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.224590 4834 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.224609 4834 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.224627 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dkc6n\" (UniqueName: \"kubernetes.io/projected/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-kube-api-access-dkc6n\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.224644 4834 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-audit-dir\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.224661 4834 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.278425 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-kk9xp"] Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.282627 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-kk9xp"] Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.942237 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" event={"ID":"a08a0a0e-319b-4021-992e-9cc5fa8ae4cd","Type":"ContainerDied","Data":"27e8b8ca5e6a3505481dec20a7f6620271808938b332b4bae13a83932a179d72"} Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.942311 4834 scope.go:117] "RemoveContainer" containerID="8369d4e8ebce379a776a0eaa126786ab666b5c9fd5576ea4bb678328bff9c1e2" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.942337 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4p48q" Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.971988 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4p48q"] Feb 23 09:10:55 crc kubenswrapper[4834]: I0223 09:10:55.975457 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4p48q"] Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.033680 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-6bffc96f45-78nwl"] Feb 23 09:10:56 crc kubenswrapper[4834]: E0223 09:10:56.033886 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07261104-4428-40bf-b7f6-10319fa3ba42" containerName="registry-server" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.033898 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="07261104-4428-40bf-b7f6-10319fa3ba42" containerName="registry-server" Feb 23 09:10:56 crc kubenswrapper[4834]: E0223 09:10:56.033905 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ad50479-c17b-4e80-b57a-ef039e81c612" containerName="extract-content" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.033911 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ad50479-c17b-4e80-b57a-ef039e81c612" containerName="extract-content" Feb 23 09:10:56 crc kubenswrapper[4834]: E0223 09:10:56.033922 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ad50479-c17b-4e80-b57a-ef039e81c612" containerName="extract-utilities" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.033929 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ad50479-c17b-4e80-b57a-ef039e81c612" containerName="extract-utilities" Feb 23 09:10:56 crc kubenswrapper[4834]: E0223 09:10:56.033945 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07261104-4428-40bf-b7f6-10319fa3ba42" containerName="extract-content" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.033953 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="07261104-4428-40bf-b7f6-10319fa3ba42" containerName="extract-content" Feb 23 09:10:56 crc kubenswrapper[4834]: E0223 09:10:56.033962 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07261104-4428-40bf-b7f6-10319fa3ba42" containerName="extract-utilities" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.033967 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="07261104-4428-40bf-b7f6-10319fa3ba42" containerName="extract-utilities" Feb 23 09:10:56 crc kubenswrapper[4834]: E0223 09:10:56.033977 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4ad50479-c17b-4e80-b57a-ef039e81c612" containerName="registry-server" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.033983 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="4ad50479-c17b-4e80-b57a-ef039e81c612" containerName="registry-server" Feb 23 09:10:56 crc kubenswrapper[4834]: E0223 09:10:56.033992 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a08a0a0e-319b-4021-992e-9cc5fa8ae4cd" containerName="oauth-openshift" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.033998 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="a08a0a0e-319b-4021-992e-9cc5fa8ae4cd" containerName="oauth-openshift" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.034087 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="a08a0a0e-319b-4021-992e-9cc5fa8ae4cd" containerName="oauth-openshift" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.034097 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="4ad50479-c17b-4e80-b57a-ef039e81c612" containerName="registry-server" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.034108 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="07261104-4428-40bf-b7f6-10319fa3ba42" containerName="registry-server" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.034500 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.036360 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.036450 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.036942 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.037537 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.037826 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.037860 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.037931 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.038838 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.038932 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.039361 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.039753 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.039800 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.050525 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.052921 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.053865 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6bffc96f45-78nwl"] Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.054917 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.235557 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-user-template-login\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.236040 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.236076 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.236104 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-audit-dir\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.236143 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-system-session\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.236181 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.236240 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-system-service-ca\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.236472 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-system-router-certs\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.236552 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.236589 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g42tt\" (UniqueName: \"kubernetes.io/projected/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-kube-api-access-g42tt\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.236683 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.236710 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-user-template-error\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.236734 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.236777 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-audit-policies\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.340761 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g42tt\" (UniqueName: \"kubernetes.io/projected/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-kube-api-access-g42tt\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.340885 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.340959 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-user-template-error\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.341018 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.341052 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-audit-policies\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.341140 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-user-template-login\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.341218 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.341277 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.341339 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-audit-dir\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.341379 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-system-session\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.341466 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.341558 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-system-service-ca\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.341645 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-system-router-certs\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.341727 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.345774 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-audit-dir\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.345977 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-audit-policies\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.346556 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-system-cliconfig\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.346912 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.347086 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-system-service-ca\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.349331 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-user-template-error\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.349441 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-system-session\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.349819 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-system-serving-cert\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.350942 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.352590 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.353292 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-system-router-certs\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.353345 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-user-template-login\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.354157 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.361149 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g42tt\" (UniqueName: \"kubernetes.io/projected/28c6e3a4-cf52-4f54-a443-203ba6d2f8db-kube-api-access-g42tt\") pod \"oauth-openshift-6bffc96f45-78nwl\" (UID: \"28c6e3a4-cf52-4f54-a443-203ba6d2f8db\") " pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.595988 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4ad50479-c17b-4e80-b57a-ef039e81c612" path="/var/lib/kubelet/pods/4ad50479-c17b-4e80-b57a-ef039e81c612/volumes" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.597716 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a08a0a0e-319b-4021-992e-9cc5fa8ae4cd" path="/var/lib/kubelet/pods/a08a0a0e-319b-4021-992e-9cc5fa8ae4cd/volumes" Feb 23 09:10:56 crc kubenswrapper[4834]: I0223 09:10:56.658532 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:57 crc kubenswrapper[4834]: I0223 09:10:57.077481 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-6bffc96f45-78nwl"] Feb 23 09:10:57 crc kubenswrapper[4834]: W0223 09:10:57.221068 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod28c6e3a4_cf52_4f54_a443_203ba6d2f8db.slice/crio-2b9b71a40fc6f2baeff961cfc8bc63e292379533cd68b488638fb2c741562d9b WatchSource:0}: Error finding container 2b9b71a40fc6f2baeff961cfc8bc63e292379533cd68b488638fb2c741562d9b: Status 404 returned error can't find the container with id 2b9b71a40fc6f2baeff961cfc8bc63e292379533cd68b488638fb2c741562d9b Feb 23 09:10:57 crc kubenswrapper[4834]: I0223 09:10:57.958970 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" event={"ID":"28c6e3a4-cf52-4f54-a443-203ba6d2f8db","Type":"ContainerStarted","Data":"fad898e3e70a13c38f617cde77dd79322a0a403647fc7f0d7a6805cc59907682"} Feb 23 09:10:57 crc kubenswrapper[4834]: I0223 09:10:57.959440 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" event={"ID":"28c6e3a4-cf52-4f54-a443-203ba6d2f8db","Type":"ContainerStarted","Data":"2b9b71a40fc6f2baeff961cfc8bc63e292379533cd68b488638fb2c741562d9b"} Feb 23 09:10:57 crc kubenswrapper[4834]: I0223 09:10:57.959461 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:57 crc kubenswrapper[4834]: I0223 09:10:57.970022 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" Feb 23 09:10:57 crc kubenswrapper[4834]: I0223 09:10:57.982886 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-6bffc96f45-78nwl" podStartSLOduration=28.982860691 podStartE2EDuration="28.982860691s" podCreationTimestamp="2026-02-23 09:10:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:10:57.978834988 +0000 UTC m=+194.057149435" watchObservedRunningTime="2026-02-23 09:10:57.982860691 +0000 UTC m=+194.061175078" Feb 23 09:11:04 crc kubenswrapper[4834]: I0223 09:11:04.970074 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-8657d779d8-j47tb"] Feb 23 09:11:04 crc kubenswrapper[4834]: I0223 09:11:04.971134 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" podUID="c803df1e-f605-4117-bf40-dabc658a6821" containerName="controller-manager" containerID="cri-o://d23cf760a09a8974fb5b8e7a3ce3829778982fc2831bdddf8641d1208672f962" gracePeriod=30 Feb 23 09:11:04 crc kubenswrapper[4834]: I0223 09:11:04.981114 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh"] Feb 23 09:11:04 crc kubenswrapper[4834]: I0223 09:11:04.981342 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" podUID="8be5b6f3-e560-446d-afb1-2160a964a09e" containerName="route-controller-manager" containerID="cri-o://0ac6b2f25d7df92a8486944e362ffa04a5a29107497e7010b319b4dfee825279" gracePeriod=30 Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.506331 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.575866 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8be5b6f3-e560-446d-afb1-2160a964a09e-client-ca\") pod \"8be5b6f3-e560-446d-afb1-2160a964a09e\" (UID: \"8be5b6f3-e560-446d-afb1-2160a964a09e\") " Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.575956 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8be5b6f3-e560-446d-afb1-2160a964a09e-serving-cert\") pod \"8be5b6f3-e560-446d-afb1-2160a964a09e\" (UID: \"8be5b6f3-e560-446d-afb1-2160a964a09e\") " Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.576002 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7b5ps\" (UniqueName: \"kubernetes.io/projected/8be5b6f3-e560-446d-afb1-2160a964a09e-kube-api-access-7b5ps\") pod \"8be5b6f3-e560-446d-afb1-2160a964a09e\" (UID: \"8be5b6f3-e560-446d-afb1-2160a964a09e\") " Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.576057 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8be5b6f3-e560-446d-afb1-2160a964a09e-config\") pod \"8be5b6f3-e560-446d-afb1-2160a964a09e\" (UID: \"8be5b6f3-e560-446d-afb1-2160a964a09e\") " Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.576769 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8be5b6f3-e560-446d-afb1-2160a964a09e-client-ca" (OuterVolumeSpecName: "client-ca") pod "8be5b6f3-e560-446d-afb1-2160a964a09e" (UID: "8be5b6f3-e560-446d-afb1-2160a964a09e"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.576786 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8be5b6f3-e560-446d-afb1-2160a964a09e-config" (OuterVolumeSpecName: "config") pod "8be5b6f3-e560-446d-afb1-2160a964a09e" (UID: "8be5b6f3-e560-446d-afb1-2160a964a09e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.581924 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8be5b6f3-e560-446d-afb1-2160a964a09e-kube-api-access-7b5ps" (OuterVolumeSpecName: "kube-api-access-7b5ps") pod "8be5b6f3-e560-446d-afb1-2160a964a09e" (UID: "8be5b6f3-e560-446d-afb1-2160a964a09e"). InnerVolumeSpecName "kube-api-access-7b5ps". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.591820 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8be5b6f3-e560-446d-afb1-2160a964a09e-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8be5b6f3-e560-446d-afb1-2160a964a09e" (UID: "8be5b6f3-e560-446d-afb1-2160a964a09e"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.628836 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.677348 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c803df1e-f605-4117-bf40-dabc658a6821-serving-cert\") pod \"c803df1e-f605-4117-bf40-dabc658a6821\" (UID: \"c803df1e-f605-4117-bf40-dabc658a6821\") " Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.677502 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c803df1e-f605-4117-bf40-dabc658a6821-client-ca\") pod \"c803df1e-f605-4117-bf40-dabc658a6821\" (UID: \"c803df1e-f605-4117-bf40-dabc658a6821\") " Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.677583 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c803df1e-f605-4117-bf40-dabc658a6821-config\") pod \"c803df1e-f605-4117-bf40-dabc658a6821\" (UID: \"c803df1e-f605-4117-bf40-dabc658a6821\") " Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.677613 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n8nck\" (UniqueName: \"kubernetes.io/projected/c803df1e-f605-4117-bf40-dabc658a6821-kube-api-access-n8nck\") pod \"c803df1e-f605-4117-bf40-dabc658a6821\" (UID: \"c803df1e-f605-4117-bf40-dabc658a6821\") " Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.677641 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c803df1e-f605-4117-bf40-dabc658a6821-proxy-ca-bundles\") pod \"c803df1e-f605-4117-bf40-dabc658a6821\" (UID: \"c803df1e-f605-4117-bf40-dabc658a6821\") " Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.677954 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7b5ps\" (UniqueName: \"kubernetes.io/projected/8be5b6f3-e560-446d-afb1-2160a964a09e-kube-api-access-7b5ps\") on node \"crc\" DevicePath \"\"" Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.677979 4834 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8be5b6f3-e560-446d-afb1-2160a964a09e-config\") on node \"crc\" DevicePath \"\"" Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.677993 4834 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8be5b6f3-e560-446d-afb1-2160a964a09e-client-ca\") on node \"crc\" DevicePath \"\"" Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.678004 4834 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8be5b6f3-e560-446d-afb1-2160a964a09e-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.678150 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c803df1e-f605-4117-bf40-dabc658a6821-client-ca" (OuterVolumeSpecName: "client-ca") pod "c803df1e-f605-4117-bf40-dabc658a6821" (UID: "c803df1e-f605-4117-bf40-dabc658a6821"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.678261 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c803df1e-f605-4117-bf40-dabc658a6821-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "c803df1e-f605-4117-bf40-dabc658a6821" (UID: "c803df1e-f605-4117-bf40-dabc658a6821"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.678301 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c803df1e-f605-4117-bf40-dabc658a6821-config" (OuterVolumeSpecName: "config") pod "c803df1e-f605-4117-bf40-dabc658a6821" (UID: "c803df1e-f605-4117-bf40-dabc658a6821"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.680343 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c803df1e-f605-4117-bf40-dabc658a6821-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c803df1e-f605-4117-bf40-dabc658a6821" (UID: "c803df1e-f605-4117-bf40-dabc658a6821"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.680375 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c803df1e-f605-4117-bf40-dabc658a6821-kube-api-access-n8nck" (OuterVolumeSpecName: "kube-api-access-n8nck") pod "c803df1e-f605-4117-bf40-dabc658a6821" (UID: "c803df1e-f605-4117-bf40-dabc658a6821"). InnerVolumeSpecName "kube-api-access-n8nck". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.779066 4834 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c803df1e-f605-4117-bf40-dabc658a6821-serving-cert\") on node \"crc\" DevicePath \"\"" Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.779100 4834 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c803df1e-f605-4117-bf40-dabc658a6821-client-ca\") on node \"crc\" DevicePath \"\"" Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.779112 4834 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c803df1e-f605-4117-bf40-dabc658a6821-config\") on node \"crc\" DevicePath \"\"" Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.779123 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n8nck\" (UniqueName: \"kubernetes.io/projected/c803df1e-f605-4117-bf40-dabc658a6821-kube-api-access-n8nck\") on node \"crc\" DevicePath \"\"" Feb 23 09:11:05 crc kubenswrapper[4834]: I0223 09:11:05.779133 4834 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c803df1e-f605-4117-bf40-dabc658a6821-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.025972 4834 generic.go:334] "Generic (PLEG): container finished" podID="c803df1e-f605-4117-bf40-dabc658a6821" containerID="d23cf760a09a8974fb5b8e7a3ce3829778982fc2831bdddf8641d1208672f962" exitCode=0 Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.026037 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.026054 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" event={"ID":"c803df1e-f605-4117-bf40-dabc658a6821","Type":"ContainerDied","Data":"d23cf760a09a8974fb5b8e7a3ce3829778982fc2831bdddf8641d1208672f962"} Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.026178 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-8657d779d8-j47tb" event={"ID":"c803df1e-f605-4117-bf40-dabc658a6821","Type":"ContainerDied","Data":"d2f6a119fc17a1f326b87c589062185b49c0023508a911e2e7f4291a4f0aa294"} Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.026201 4834 scope.go:117] "RemoveContainer" containerID="d23cf760a09a8974fb5b8e7a3ce3829778982fc2831bdddf8641d1208672f962" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.028620 4834 generic.go:334] "Generic (PLEG): container finished" podID="8be5b6f3-e560-446d-afb1-2160a964a09e" containerID="0ac6b2f25d7df92a8486944e362ffa04a5a29107497e7010b319b4dfee825279" exitCode=0 Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.028667 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" event={"ID":"8be5b6f3-e560-446d-afb1-2160a964a09e","Type":"ContainerDied","Data":"0ac6b2f25d7df92a8486944e362ffa04a5a29107497e7010b319b4dfee825279"} Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.028680 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.028700 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh" event={"ID":"8be5b6f3-e560-446d-afb1-2160a964a09e","Type":"ContainerDied","Data":"462fa6c40747949678b926be50c9abc27404cfddc5fdbe6787eb182d3921aebf"} Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.035859 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-644d8f66fd-mc6m9"] Feb 23 09:11:06 crc kubenswrapper[4834]: E0223 09:11:06.036100 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8be5b6f3-e560-446d-afb1-2160a964a09e" containerName="route-controller-manager" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.036115 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="8be5b6f3-e560-446d-afb1-2160a964a09e" containerName="route-controller-manager" Feb 23 09:11:06 crc kubenswrapper[4834]: E0223 09:11:06.036130 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c803df1e-f605-4117-bf40-dabc658a6821" containerName="controller-manager" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.036138 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="c803df1e-f605-4117-bf40-dabc658a6821" containerName="controller-manager" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.036249 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="c803df1e-f605-4117-bf40-dabc658a6821" containerName="controller-manager" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.036264 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="8be5b6f3-e560-446d-afb1-2160a964a09e" containerName="route-controller-manager" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.036647 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.039118 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.040003 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.040077 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.040140 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.040386 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.043869 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.054626 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.059723 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-644d8f66fd-mc6m9"] Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.063314 4834 scope.go:117] "RemoveContainer" containerID="d23cf760a09a8974fb5b8e7a3ce3829778982fc2831bdddf8641d1208672f962" Feb 23 09:11:06 crc kubenswrapper[4834]: E0223 09:11:06.069931 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d23cf760a09a8974fb5b8e7a3ce3829778982fc2831bdddf8641d1208672f962\": container with ID starting with d23cf760a09a8974fb5b8e7a3ce3829778982fc2831bdddf8641d1208672f962 not found: ID does not exist" containerID="d23cf760a09a8974fb5b8e7a3ce3829778982fc2831bdddf8641d1208672f962" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.069980 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d23cf760a09a8974fb5b8e7a3ce3829778982fc2831bdddf8641d1208672f962"} err="failed to get container status \"d23cf760a09a8974fb5b8e7a3ce3829778982fc2831bdddf8641d1208672f962\": rpc error: code = NotFound desc = could not find container \"d23cf760a09a8974fb5b8e7a3ce3829778982fc2831bdddf8641d1208672f962\": container with ID starting with d23cf760a09a8974fb5b8e7a3ce3829778982fc2831bdddf8641d1208672f962 not found: ID does not exist" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.070010 4834 scope.go:117] "RemoveContainer" containerID="0ac6b2f25d7df92a8486944e362ffa04a5a29107497e7010b319b4dfee825279" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.082878 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c739f358-d8fc-4885-8e91-54da12743c41-proxy-ca-bundles\") pod \"controller-manager-644d8f66fd-mc6m9\" (UID: \"c739f358-d8fc-4885-8e91-54da12743c41\") " pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.082960 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c739f358-d8fc-4885-8e91-54da12743c41-client-ca\") pod \"controller-manager-644d8f66fd-mc6m9\" (UID: \"c739f358-d8fc-4885-8e91-54da12743c41\") " pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.082986 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c739f358-d8fc-4885-8e91-54da12743c41-config\") pod \"controller-manager-644d8f66fd-mc6m9\" (UID: \"c739f358-d8fc-4885-8e91-54da12743c41\") " pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.083016 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vgtq\" (UniqueName: \"kubernetes.io/projected/c739f358-d8fc-4885-8e91-54da12743c41-kube-api-access-6vgtq\") pod \"controller-manager-644d8f66fd-mc6m9\" (UID: \"c739f358-d8fc-4885-8e91-54da12743c41\") " pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.083070 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c739f358-d8fc-4885-8e91-54da12743c41-serving-cert\") pod \"controller-manager-644d8f66fd-mc6m9\" (UID: \"c739f358-d8fc-4885-8e91-54da12743c41\") " pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.084697 4834 scope.go:117] "RemoveContainer" containerID="0ac6b2f25d7df92a8486944e362ffa04a5a29107497e7010b319b4dfee825279" Feb 23 09:11:06 crc kubenswrapper[4834]: E0223 09:11:06.085035 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0ac6b2f25d7df92a8486944e362ffa04a5a29107497e7010b319b4dfee825279\": container with ID starting with 0ac6b2f25d7df92a8486944e362ffa04a5a29107497e7010b319b4dfee825279 not found: ID does not exist" containerID="0ac6b2f25d7df92a8486944e362ffa04a5a29107497e7010b319b4dfee825279" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.085067 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0ac6b2f25d7df92a8486944e362ffa04a5a29107497e7010b319b4dfee825279"} err="failed to get container status \"0ac6b2f25d7df92a8486944e362ffa04a5a29107497e7010b319b4dfee825279\": rpc error: code = NotFound desc = could not find container \"0ac6b2f25d7df92a8486944e362ffa04a5a29107497e7010b319b4dfee825279\": container with ID starting with 0ac6b2f25d7df92a8486944e362ffa04a5a29107497e7010b319b4dfee825279 not found: ID does not exist" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.095038 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh"] Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.106180 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5775d66fc8-btxjh"] Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.108627 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-8657d779d8-j47tb"] Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.111721 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-8657d779d8-j47tb"] Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.184067 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c739f358-d8fc-4885-8e91-54da12743c41-config\") pod \"controller-manager-644d8f66fd-mc6m9\" (UID: \"c739f358-d8fc-4885-8e91-54da12743c41\") " pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.184360 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vgtq\" (UniqueName: \"kubernetes.io/projected/c739f358-d8fc-4885-8e91-54da12743c41-kube-api-access-6vgtq\") pod \"controller-manager-644d8f66fd-mc6m9\" (UID: \"c739f358-d8fc-4885-8e91-54da12743c41\") " pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.184508 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c739f358-d8fc-4885-8e91-54da12743c41-serving-cert\") pod \"controller-manager-644d8f66fd-mc6m9\" (UID: \"c739f358-d8fc-4885-8e91-54da12743c41\") " pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.184616 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c739f358-d8fc-4885-8e91-54da12743c41-proxy-ca-bundles\") pod \"controller-manager-644d8f66fd-mc6m9\" (UID: \"c739f358-d8fc-4885-8e91-54da12743c41\") " pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.184686 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c739f358-d8fc-4885-8e91-54da12743c41-client-ca\") pod \"controller-manager-644d8f66fd-mc6m9\" (UID: \"c739f358-d8fc-4885-8e91-54da12743c41\") " pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.199138 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c739f358-d8fc-4885-8e91-54da12743c41-config\") pod \"controller-manager-644d8f66fd-mc6m9\" (UID: \"c739f358-d8fc-4885-8e91-54da12743c41\") " pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.200327 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/c739f358-d8fc-4885-8e91-54da12743c41-proxy-ca-bundles\") pod \"controller-manager-644d8f66fd-mc6m9\" (UID: \"c739f358-d8fc-4885-8e91-54da12743c41\") " pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.203221 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c739f358-d8fc-4885-8e91-54da12743c41-client-ca\") pod \"controller-manager-644d8f66fd-mc6m9\" (UID: \"c739f358-d8fc-4885-8e91-54da12743c41\") " pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.203766 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c739f358-d8fc-4885-8e91-54da12743c41-serving-cert\") pod \"controller-manager-644d8f66fd-mc6m9\" (UID: \"c739f358-d8fc-4885-8e91-54da12743c41\") " pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.207227 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vgtq\" (UniqueName: \"kubernetes.io/projected/c739f358-d8fc-4885-8e91-54da12743c41-kube-api-access-6vgtq\") pod \"controller-manager-644d8f66fd-mc6m9\" (UID: \"c739f358-d8fc-4885-8e91-54da12743c41\") " pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.364787 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.594041 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8be5b6f3-e560-446d-afb1-2160a964a09e" path="/var/lib/kubelet/pods/8be5b6f3-e560-446d-afb1-2160a964a09e/volumes" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.595156 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c803df1e-f605-4117-bf40-dabc658a6821" path="/var/lib/kubelet/pods/c803df1e-f605-4117-bf40-dabc658a6821/volumes" Feb 23 09:11:06 crc kubenswrapper[4834]: I0223 09:11:06.796306 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-644d8f66fd-mc6m9"] Feb 23 09:11:06 crc kubenswrapper[4834]: W0223 09:11:06.802486 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc739f358_d8fc_4885_8e91_54da12743c41.slice/crio-d69bdb9811db9bf7784b72b2f521000e50b41a8e94dbfd033317db6d9f6f4d23 WatchSource:0}: Error finding container d69bdb9811db9bf7784b72b2f521000e50b41a8e94dbfd033317db6d9f6f4d23: Status 404 returned error can't find the container with id d69bdb9811db9bf7784b72b2f521000e50b41a8e94dbfd033317db6d9f6f4d23 Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.036962 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw"] Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.037725 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.041242 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" event={"ID":"c739f358-d8fc-4885-8e91-54da12743c41","Type":"ContainerStarted","Data":"24afa06de8d72e0cdcb59eb1427854588a12077ad8b8e1d81abe31fb8ec3c916"} Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.041298 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" event={"ID":"c739f358-d8fc-4885-8e91-54da12743c41","Type":"ContainerStarted","Data":"d69bdb9811db9bf7784b72b2f521000e50b41a8e94dbfd033317db6d9f6f4d23"} Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.042429 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.042711 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.042792 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.042854 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.043149 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.043371 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.043608 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.049736 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.056301 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw"] Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.095273 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcsfb\" (UniqueName: \"kubernetes.io/projected/2d571693-6e87-4f7a-8b9e-b320ffef7b26-kube-api-access-xcsfb\") pod \"route-controller-manager-b787d84c8-rdjnw\" (UID: \"2d571693-6e87-4f7a-8b9e-b320ffef7b26\") " pod="openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.095427 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d571693-6e87-4f7a-8b9e-b320ffef7b26-serving-cert\") pod \"route-controller-manager-b787d84c8-rdjnw\" (UID: \"2d571693-6e87-4f7a-8b9e-b320ffef7b26\") " pod="openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.095452 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d571693-6e87-4f7a-8b9e-b320ffef7b26-config\") pod \"route-controller-manager-b787d84c8-rdjnw\" (UID: \"2d571693-6e87-4f7a-8b9e-b320ffef7b26\") " pod="openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.095473 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2d571693-6e87-4f7a-8b9e-b320ffef7b26-client-ca\") pod \"route-controller-manager-b787d84c8-rdjnw\" (UID: \"2d571693-6e87-4f7a-8b9e-b320ffef7b26\") " pod="openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.196426 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d571693-6e87-4f7a-8b9e-b320ffef7b26-serving-cert\") pod \"route-controller-manager-b787d84c8-rdjnw\" (UID: \"2d571693-6e87-4f7a-8b9e-b320ffef7b26\") " pod="openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.196503 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d571693-6e87-4f7a-8b9e-b320ffef7b26-config\") pod \"route-controller-manager-b787d84c8-rdjnw\" (UID: \"2d571693-6e87-4f7a-8b9e-b320ffef7b26\") " pod="openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.196542 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2d571693-6e87-4f7a-8b9e-b320ffef7b26-client-ca\") pod \"route-controller-manager-b787d84c8-rdjnw\" (UID: \"2d571693-6e87-4f7a-8b9e-b320ffef7b26\") " pod="openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.196603 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcsfb\" (UniqueName: \"kubernetes.io/projected/2d571693-6e87-4f7a-8b9e-b320ffef7b26-kube-api-access-xcsfb\") pod \"route-controller-manager-b787d84c8-rdjnw\" (UID: \"2d571693-6e87-4f7a-8b9e-b320ffef7b26\") " pod="openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.197875 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d571693-6e87-4f7a-8b9e-b320ffef7b26-config\") pod \"route-controller-manager-b787d84c8-rdjnw\" (UID: \"2d571693-6e87-4f7a-8b9e-b320ffef7b26\") " pod="openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.198012 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/2d571693-6e87-4f7a-8b9e-b320ffef7b26-client-ca\") pod \"route-controller-manager-b787d84c8-rdjnw\" (UID: \"2d571693-6e87-4f7a-8b9e-b320ffef7b26\") " pod="openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.202156 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d571693-6e87-4f7a-8b9e-b320ffef7b26-serving-cert\") pod \"route-controller-manager-b787d84c8-rdjnw\" (UID: \"2d571693-6e87-4f7a-8b9e-b320ffef7b26\") " pod="openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.223111 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcsfb\" (UniqueName: \"kubernetes.io/projected/2d571693-6e87-4f7a-8b9e-b320ffef7b26-kube-api-access-xcsfb\") pod \"route-controller-manager-b787d84c8-rdjnw\" (UID: \"2d571693-6e87-4f7a-8b9e-b320ffef7b26\") " pod="openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.352469 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.553274 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-644d8f66fd-mc6m9" podStartSLOduration=3.553254045 podStartE2EDuration="3.553254045s" podCreationTimestamp="2026-02-23 09:11:04 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:11:07.092759109 +0000 UTC m=+203.171073506" watchObservedRunningTime="2026-02-23 09:11:07.553254045 +0000 UTC m=+203.631568422" Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.555690 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw"] Feb 23 09:11:07 crc kubenswrapper[4834]: W0223 09:11:07.558231 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2d571693_6e87_4f7a_8b9e_b320ffef7b26.slice/crio-b6ad10837b87b464eeba93d631ecd499f2ecbb0595bde67e9020b657513b3709 WatchSource:0}: Error finding container b6ad10837b87b464eeba93d631ecd499f2ecbb0595bde67e9020b657513b3709: Status 404 returned error can't find the container with id b6ad10837b87b464eeba93d631ecd499f2ecbb0595bde67e9020b657513b3709 Feb 23 09:11:07 crc kubenswrapper[4834]: I0223 09:11:07.630240 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Feb 23 09:11:08 crc kubenswrapper[4834]: I0223 09:11:08.053255 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw" event={"ID":"2d571693-6e87-4f7a-8b9e-b320ffef7b26","Type":"ContainerStarted","Data":"3a023bf6fcf09af6624ee0e399d0682ee0e3c9755a6101e40f316ea1a63b0c46"} Feb 23 09:11:08 crc kubenswrapper[4834]: I0223 09:11:08.053628 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw" event={"ID":"2d571693-6e87-4f7a-8b9e-b320ffef7b26","Type":"ContainerStarted","Data":"b6ad10837b87b464eeba93d631ecd499f2ecbb0595bde67e9020b657513b3709"} Feb 23 09:11:08 crc kubenswrapper[4834]: I0223 09:11:08.071474 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw" podStartSLOduration=3.071458717 podStartE2EDuration="3.071458717s" podCreationTimestamp="2026-02-23 09:11:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:11:08.069058899 +0000 UTC m=+204.147373286" watchObservedRunningTime="2026-02-23 09:11:08.071458717 +0000 UTC m=+204.149773104" Feb 23 09:11:09 crc kubenswrapper[4834]: I0223 09:11:09.058261 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw" Feb 23 09:11:09 crc kubenswrapper[4834]: I0223 09:11:09.062658 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-b787d84c8-rdjnw" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.741851 4834 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.743943 4834 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.744045 4834 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.744060 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: E0223 09:11:11.744304 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.744363 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://e88acae3be30837dd0bb120eebb551d0d75cff7fb45c39c26d6544ee7f576778" gracePeriod=15 Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.744417 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://06c52566aed1d53aa400ebba7eb5642d90291f5e0a712fe950ba434a8bd2c342" gracePeriod=15 Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.744426 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://c99b854342f23f595ebbf00de97b11b8a4bbd92e1bbcfcb876dd8d8139f84b15" gracePeriod=15 Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.744373 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.744318 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://fe1ece38b380cdd99d8c323c271e3753fdacdbbfb65f9ea0f7d46a4b99443ea7" gracePeriod=15 Feb 23 09:11:11 crc kubenswrapper[4834]: E0223 09:11:11.744509 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.744526 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 23 09:11:11 crc kubenswrapper[4834]: E0223 09:11:11.744548 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.744557 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Feb 23 09:11:11 crc kubenswrapper[4834]: E0223 09:11:11.744571 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.744579 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 23 09:11:11 crc kubenswrapper[4834]: E0223 09:11:11.744598 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.744604 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Feb 23 09:11:11 crc kubenswrapper[4834]: E0223 09:11:11.744616 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.744622 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Feb 23 09:11:11 crc kubenswrapper[4834]: E0223 09:11:11.744649 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.744655 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Feb 23 09:11:11 crc kubenswrapper[4834]: E0223 09:11:11.744670 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.744675 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.744836 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.744845 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.744852 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.744861 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.744875 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.744886 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.744898 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Feb 23 09:11:11 crc kubenswrapper[4834]: E0223 09:11:11.745016 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.745026 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 23 09:11:11 crc kubenswrapper[4834]: E0223 09:11:11.745034 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.745042 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.745124 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.745285 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.745546 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://828b449e3bfa815231a28f30a1f8d4360fc21abd57f0f5fbb2297c41a7116189" gracePeriod=15 Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.752105 4834 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.782737 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.868070 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.868112 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.868150 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.868167 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.868195 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.868211 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.868233 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.868250 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.970051 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.970104 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.970108 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.970140 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.970180 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.970207 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.970244 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.970270 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.970297 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.970301 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.970320 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.970324 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.970353 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.970302 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.970328 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 23 09:11:11 crc kubenswrapper[4834]: I0223 09:11:11.970410 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 23 09:11:12 crc kubenswrapper[4834]: I0223 09:11:12.077291 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/3.log" Feb 23 09:11:12 crc kubenswrapper[4834]: I0223 09:11:12.078790 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Feb 23 09:11:12 crc kubenswrapper[4834]: I0223 09:11:12.079429 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 23 09:11:12 crc kubenswrapper[4834]: I0223 09:11:12.079536 4834 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="e88acae3be30837dd0bb120eebb551d0d75cff7fb45c39c26d6544ee7f576778" exitCode=0 Feb 23 09:11:12 crc kubenswrapper[4834]: I0223 09:11:12.079561 4834 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="c99b854342f23f595ebbf00de97b11b8a4bbd92e1bbcfcb876dd8d8139f84b15" exitCode=0 Feb 23 09:11:12 crc kubenswrapper[4834]: I0223 09:11:12.079573 4834 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="828b449e3bfa815231a28f30a1f8d4360fc21abd57f0f5fbb2297c41a7116189" exitCode=0 Feb 23 09:11:12 crc kubenswrapper[4834]: I0223 09:11:12.079583 4834 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="06c52566aed1d53aa400ebba7eb5642d90291f5e0a712fe950ba434a8bd2c342" exitCode=2 Feb 23 09:11:12 crc kubenswrapper[4834]: I0223 09:11:12.079589 4834 scope.go:117] "RemoveContainer" containerID="081a2d265347d4b3dd1e8e1d267b680aade400f4b0b6a1d93637b329bf67541d" Feb 23 09:11:12 crc kubenswrapper[4834]: I0223 09:11:12.081976 4834 generic.go:334] "Generic (PLEG): container finished" podID="71f4a070-36cd-4f2b-8b37-1f93aeaee935" containerID="51bd37d106375d5cf564364c93d4c90bf980e512a45d0e263861e0740d3ffc92" exitCode=0 Feb 23 09:11:12 crc kubenswrapper[4834]: I0223 09:11:12.082016 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"71f4a070-36cd-4f2b-8b37-1f93aeaee935","Type":"ContainerDied","Data":"51bd37d106375d5cf564364c93d4c90bf980e512a45d0e263861e0740d3ffc92"} Feb 23 09:11:12 crc kubenswrapper[4834]: I0223 09:11:12.082766 4834 status_manager.go:851] "Failed to get status for pod" podUID="71f4a070-36cd-4f2b-8b37-1f93aeaee935" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:12 crc kubenswrapper[4834]: I0223 09:11:12.082993 4834 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:12 crc kubenswrapper[4834]: W0223 09:11:12.109592 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-e35d6261c739ba4c5f57434c2d70b66a6bd7778d4f7b76bbefb89252ce5a8e82 WatchSource:0}: Error finding container e35d6261c739ba4c5f57434c2d70b66a6bd7778d4f7b76bbefb89252ce5a8e82: Status 404 returned error can't find the container with id e35d6261c739ba4c5f57434c2d70b66a6bd7778d4f7b76bbefb89252ce5a8e82 Feb 23 09:11:12 crc kubenswrapper[4834]: E0223 09:11:12.113444 4834 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.150:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.1896d52836dfac62 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-23 09:11:12.112905314 +0000 UTC m=+208.191219701,LastTimestamp:2026-02-23 09:11:12.112905314 +0000 UTC m=+208.191219701,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 23 09:11:13 crc kubenswrapper[4834]: I0223 09:11:13.093235 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"3aeb849089017f7cc033417e2973ea0906eb4949d1c4ccaadabc8922acee6f4f"} Feb 23 09:11:13 crc kubenswrapper[4834]: I0223 09:11:13.093598 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"e35d6261c739ba4c5f57434c2d70b66a6bd7778d4f7b76bbefb89252ce5a8e82"} Feb 23 09:11:13 crc kubenswrapper[4834]: I0223 09:11:13.094191 4834 status_manager.go:851] "Failed to get status for pod" podUID="71f4a070-36cd-4f2b-8b37-1f93aeaee935" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:13 crc kubenswrapper[4834]: I0223 09:11:13.094432 4834 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:13 crc kubenswrapper[4834]: I0223 09:11:13.096982 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Feb 23 09:11:13 crc kubenswrapper[4834]: E0223 09:11:13.424824 4834 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:13 crc kubenswrapper[4834]: E0223 09:11:13.425469 4834 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:13 crc kubenswrapper[4834]: E0223 09:11:13.425897 4834 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:13 crc kubenswrapper[4834]: E0223 09:11:13.426635 4834 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:13 crc kubenswrapper[4834]: E0223 09:11:13.427065 4834 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:13 crc kubenswrapper[4834]: I0223 09:11:13.427120 4834 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Feb 23 09:11:13 crc kubenswrapper[4834]: E0223 09:11:13.427519 4834 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.150:6443: connect: connection refused" interval="200ms" Feb 23 09:11:13 crc kubenswrapper[4834]: E0223 09:11:13.629252 4834 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.150:6443: connect: connection refused" interval="400ms" Feb 23 09:11:13 crc kubenswrapper[4834]: I0223 09:11:13.757976 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Feb 23 09:11:13 crc kubenswrapper[4834]: I0223 09:11:13.758610 4834 status_manager.go:851] "Failed to get status for pod" podUID="71f4a070-36cd-4f2b-8b37-1f93aeaee935" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:13 crc kubenswrapper[4834]: I0223 09:11:13.759048 4834 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:13 crc kubenswrapper[4834]: I0223 09:11:13.792321 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/71f4a070-36cd-4f2b-8b37-1f93aeaee935-kubelet-dir\") pod \"71f4a070-36cd-4f2b-8b37-1f93aeaee935\" (UID: \"71f4a070-36cd-4f2b-8b37-1f93aeaee935\") " Feb 23 09:11:13 crc kubenswrapper[4834]: I0223 09:11:13.792837 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71f4a070-36cd-4f2b-8b37-1f93aeaee935-kube-api-access\") pod \"71f4a070-36cd-4f2b-8b37-1f93aeaee935\" (UID: \"71f4a070-36cd-4f2b-8b37-1f93aeaee935\") " Feb 23 09:11:13 crc kubenswrapper[4834]: I0223 09:11:13.792474 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/71f4a070-36cd-4f2b-8b37-1f93aeaee935-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "71f4a070-36cd-4f2b-8b37-1f93aeaee935" (UID: "71f4a070-36cd-4f2b-8b37-1f93aeaee935"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:11:13 crc kubenswrapper[4834]: I0223 09:11:13.793073 4834 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/71f4a070-36cd-4f2b-8b37-1f93aeaee935-kubelet-dir\") on node \"crc\" DevicePath \"\"" Feb 23 09:11:13 crc kubenswrapper[4834]: I0223 09:11:13.801346 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/71f4a070-36cd-4f2b-8b37-1f93aeaee935-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "71f4a070-36cd-4f2b-8b37-1f93aeaee935" (UID: "71f4a070-36cd-4f2b-8b37-1f93aeaee935"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:11:13 crc kubenswrapper[4834]: I0223 09:11:13.893438 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/71f4a070-36cd-4f2b-8b37-1f93aeaee935-var-lock\") pod \"71f4a070-36cd-4f2b-8b37-1f93aeaee935\" (UID: \"71f4a070-36cd-4f2b-8b37-1f93aeaee935\") " Feb 23 09:11:13 crc kubenswrapper[4834]: I0223 09:11:13.893780 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/71f4a070-36cd-4f2b-8b37-1f93aeaee935-kube-api-access\") on node \"crc\" DevicePath \"\"" Feb 23 09:11:13 crc kubenswrapper[4834]: I0223 09:11:13.893944 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/71f4a070-36cd-4f2b-8b37-1f93aeaee935-var-lock" (OuterVolumeSpecName: "var-lock") pod "71f4a070-36cd-4f2b-8b37-1f93aeaee935" (UID: "71f4a070-36cd-4f2b-8b37-1f93aeaee935"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:11:13 crc kubenswrapper[4834]: I0223 09:11:13.994651 4834 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/71f4a070-36cd-4f2b-8b37-1f93aeaee935-var-lock\") on node \"crc\" DevicePath \"\"" Feb 23 09:11:14 crc kubenswrapper[4834]: E0223 09:11:14.030595 4834 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.150:6443: connect: connection refused" interval="800ms" Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.106556 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"71f4a070-36cd-4f2b-8b37-1f93aeaee935","Type":"ContainerDied","Data":"7e0d4567441a82cdacd9e014fd74c143b5cf816c6fc97097626a13cbb59fce65"} Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.106602 4834 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7e0d4567441a82cdacd9e014fd74c143b5cf816c6fc97097626a13cbb59fce65" Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.106663 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.118883 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.119630 4834 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="fe1ece38b380cdd99d8c323c271e3753fdacdbbfb65f9ea0f7d46a4b99443ea7" exitCode=0 Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.147416 4834 status_manager.go:851] "Failed to get status for pod" podUID="71f4a070-36cd-4f2b-8b37-1f93aeaee935" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.148225 4834 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.150058 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.150802 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.151131 4834 status_manager.go:851] "Failed to get status for pod" podUID="71f4a070-36cd-4f2b-8b37-1f93aeaee935" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.151440 4834 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.151755 4834 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.297617 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.297678 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.297729 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.297829 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.297844 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.297934 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.298280 4834 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.298303 4834 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.298313 4834 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.587472 4834 status_manager.go:851] "Failed to get status for pod" podUID="71f4a070-36cd-4f2b-8b37-1f93aeaee935" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.588033 4834 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.588379 4834 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:14 crc kubenswrapper[4834]: I0223 09:11:14.591720 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Feb 23 09:11:14 crc kubenswrapper[4834]: E0223 09:11:14.830978 4834 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.150:6443: connect: connection refused" interval="1.6s" Feb 23 09:11:15 crc kubenswrapper[4834]: I0223 09:11:15.126866 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Feb 23 09:11:15 crc kubenswrapper[4834]: I0223 09:11:15.128070 4834 scope.go:117] "RemoveContainer" containerID="e88acae3be30837dd0bb120eebb551d0d75cff7fb45c39c26d6544ee7f576778" Feb 23 09:11:15 crc kubenswrapper[4834]: I0223 09:11:15.128101 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:15 crc kubenswrapper[4834]: I0223 09:11:15.128802 4834 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:15 crc kubenswrapper[4834]: I0223 09:11:15.129091 4834 status_manager.go:851] "Failed to get status for pod" podUID="71f4a070-36cd-4f2b-8b37-1f93aeaee935" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:15 crc kubenswrapper[4834]: I0223 09:11:15.129993 4834 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:15 crc kubenswrapper[4834]: I0223 09:11:15.130832 4834 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:15 crc kubenswrapper[4834]: I0223 09:11:15.131060 4834 status_manager.go:851] "Failed to get status for pod" podUID="71f4a070-36cd-4f2b-8b37-1f93aeaee935" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:15 crc kubenswrapper[4834]: I0223 09:11:15.132312 4834 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:15 crc kubenswrapper[4834]: I0223 09:11:15.144866 4834 scope.go:117] "RemoveContainer" containerID="c99b854342f23f595ebbf00de97b11b8a4bbd92e1bbcfcb876dd8d8139f84b15" Feb 23 09:11:15 crc kubenswrapper[4834]: I0223 09:11:15.158407 4834 scope.go:117] "RemoveContainer" containerID="828b449e3bfa815231a28f30a1f8d4360fc21abd57f0f5fbb2297c41a7116189" Feb 23 09:11:15 crc kubenswrapper[4834]: I0223 09:11:15.170806 4834 scope.go:117] "RemoveContainer" containerID="06c52566aed1d53aa400ebba7eb5642d90291f5e0a712fe950ba434a8bd2c342" Feb 23 09:11:15 crc kubenswrapper[4834]: I0223 09:11:15.184005 4834 scope.go:117] "RemoveContainer" containerID="fe1ece38b380cdd99d8c323c271e3753fdacdbbfb65f9ea0f7d46a4b99443ea7" Feb 23 09:11:15 crc kubenswrapper[4834]: I0223 09:11:15.198540 4834 scope.go:117] "RemoveContainer" containerID="f4b17ba16699f7a9821a75a96d4a02c4dc42407364e0b19539aa0b5c945f5741" Feb 23 09:11:16 crc kubenswrapper[4834]: E0223 09:11:16.431868 4834 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.150:6443: connect: connection refused" interval="3.2s" Feb 23 09:11:18 crc kubenswrapper[4834]: E0223 09:11:18.707055 4834 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/events\": dial tcp 38.102.83.150:6443: connect: connection refused" event="&Event{ObjectMeta:{kube-apiserver-startup-monitor-crc.1896d52836dfac62 openshift-kube-apiserver 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-kube-apiserver,Name:kube-apiserver-startup-monitor-crc,UID:f85e55b1a89d02b0cb034b1ea31ed45a,APIVersion:v1,ResourceVersion:,FieldPath:spec.containers{startup-monitor},},Reason:Pulled,Message:Container image \"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\" already present on machine,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-02-23 09:11:12.112905314 +0000 UTC m=+208.191219701,LastTimestamp:2026-02-23 09:11:12.112905314 +0000 UTC m=+208.191219701,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Feb 23 09:11:19 crc kubenswrapper[4834]: E0223 09:11:19.633557 4834 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.150:6443: connect: connection refused" interval="6.4s" Feb 23 09:11:24 crc kubenswrapper[4834]: I0223 09:11:24.590358 4834 status_manager.go:851] "Failed to get status for pod" podUID="71f4a070-36cd-4f2b-8b37-1f93aeaee935" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:24 crc kubenswrapper[4834]: I0223 09:11:24.590871 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:24 crc kubenswrapper[4834]: I0223 09:11:24.591183 4834 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:24 crc kubenswrapper[4834]: I0223 09:11:24.591684 4834 status_manager.go:851] "Failed to get status for pod" podUID="71f4a070-36cd-4f2b-8b37-1f93aeaee935" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:24 crc kubenswrapper[4834]: I0223 09:11:24.592196 4834 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:24 crc kubenswrapper[4834]: I0223 09:11:24.605252 4834 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f86d8e6-b3d5-402a-9f9b-568ac673d63c" Feb 23 09:11:24 crc kubenswrapper[4834]: I0223 09:11:24.605299 4834 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f86d8e6-b3d5-402a-9f9b-568ac673d63c" Feb 23 09:11:24 crc kubenswrapper[4834]: E0223 09:11:24.605957 4834 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:24 crc kubenswrapper[4834]: I0223 09:11:24.606667 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:25 crc kubenswrapper[4834]: I0223 09:11:25.185240 4834 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="fb46130fd69d47617f2704d1d5ca152904861b95c2788bd50698b3640ac5d7b1" exitCode=0 Feb 23 09:11:25 crc kubenswrapper[4834]: I0223 09:11:25.186000 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"fb46130fd69d47617f2704d1d5ca152904861b95c2788bd50698b3640ac5d7b1"} Feb 23 09:11:25 crc kubenswrapper[4834]: I0223 09:11:25.186079 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"206338a8f6b179bb1ea63059f923d632dc919bfa301b53203684d322d530b6c5"} Feb 23 09:11:25 crc kubenswrapper[4834]: I0223 09:11:25.188129 4834 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f86d8e6-b3d5-402a-9f9b-568ac673d63c" Feb 23 09:11:25 crc kubenswrapper[4834]: I0223 09:11:25.188190 4834 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f86d8e6-b3d5-402a-9f9b-568ac673d63c" Feb 23 09:11:25 crc kubenswrapper[4834]: I0223 09:11:25.188681 4834 status_manager.go:851] "Failed to get status for pod" podUID="71f4a070-36cd-4f2b-8b37-1f93aeaee935" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:25 crc kubenswrapper[4834]: I0223 09:11:25.188980 4834 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:25 crc kubenswrapper[4834]: E0223 09:11:25.189178 4834 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:25 crc kubenswrapper[4834]: I0223 09:11:25.189982 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/cluster-policy-controller/1.log" Feb 23 09:11:25 crc kubenswrapper[4834]: I0223 09:11:25.192916 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Feb 23 09:11:25 crc kubenswrapper[4834]: I0223 09:11:25.193017 4834 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="dad79ff6d9ba6a64b783692355dbf89a9f943e4080425ef17429c36c36b989e3" exitCode=1 Feb 23 09:11:25 crc kubenswrapper[4834]: I0223 09:11:25.193081 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"dad79ff6d9ba6a64b783692355dbf89a9f943e4080425ef17429c36c36b989e3"} Feb 23 09:11:25 crc kubenswrapper[4834]: I0223 09:11:25.193970 4834 scope.go:117] "RemoveContainer" containerID="dad79ff6d9ba6a64b783692355dbf89a9f943e4080425ef17429c36c36b989e3" Feb 23 09:11:25 crc kubenswrapper[4834]: I0223 09:11:25.194326 4834 status_manager.go:851] "Failed to get status for pod" podUID="71f4a070-36cd-4f2b-8b37-1f93aeaee935" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:25 crc kubenswrapper[4834]: I0223 09:11:25.194653 4834 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:25 crc kubenswrapper[4834]: I0223 09:11:25.195015 4834 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.150:6443: connect: connection refused" Feb 23 09:11:26 crc kubenswrapper[4834]: I0223 09:11:26.200511 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/cluster-policy-controller/1.log" Feb 23 09:11:26 crc kubenswrapper[4834]: I0223 09:11:26.202930 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Feb 23 09:11:26 crc kubenswrapper[4834]: I0223 09:11:26.202986 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"3f8f87a0b7af87125f462841603e938f4a414f188360a275277256a951668dda"} Feb 23 09:11:26 crc kubenswrapper[4834]: I0223 09:11:26.206512 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"f5e560385414e2d1676f2afb125dd0ef6717444b4226b3d44e1e691c940c0fd5"} Feb 23 09:11:26 crc kubenswrapper[4834]: I0223 09:11:26.206538 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"60492d8f0054849002068233962f5a9f6477f22ed4d05d2061319c116e093e8d"} Feb 23 09:11:26 crc kubenswrapper[4834]: I0223 09:11:26.206550 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"907d3cac844bdd2b2d8878e6bd1c6f87a167464103039a93fd6348e78d0c08b9"} Feb 23 09:11:26 crc kubenswrapper[4834]: I0223 09:11:26.206559 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"3db4febb8fd7b0f5d92853629cd2cc7ad01df83cd04029c25be0b4b9aa59c721"} Feb 23 09:11:27 crc kubenswrapper[4834]: I0223 09:11:27.214495 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"170ccb22f36ebc974b2756d3444aab03b22b202c09f79ef33e8797bb82f56eaf"} Feb 23 09:11:27 crc kubenswrapper[4834]: I0223 09:11:27.214708 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:27 crc kubenswrapper[4834]: I0223 09:11:27.214756 4834 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f86d8e6-b3d5-402a-9f9b-568ac673d63c" Feb 23 09:11:27 crc kubenswrapper[4834]: I0223 09:11:27.214782 4834 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f86d8e6-b3d5-402a-9f9b-568ac673d63c" Feb 23 09:11:27 crc kubenswrapper[4834]: I0223 09:11:27.810357 4834 patch_prober.go:28] interesting pod/machine-config-daemon-kt9lp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 23 09:11:27 crc kubenswrapper[4834]: I0223 09:11:27.810458 4834 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" podUID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 23 09:11:29 crc kubenswrapper[4834]: I0223 09:11:29.607448 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:29 crc kubenswrapper[4834]: I0223 09:11:29.607508 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:29 crc kubenswrapper[4834]: I0223 09:11:29.613459 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:32 crc kubenswrapper[4834]: I0223 09:11:32.226997 4834 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:33 crc kubenswrapper[4834]: I0223 09:11:33.245744 4834 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f86d8e6-b3d5-402a-9f9b-568ac673d63c" Feb 23 09:11:33 crc kubenswrapper[4834]: I0223 09:11:33.245809 4834 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f86d8e6-b3d5-402a-9f9b-568ac673d63c" Feb 23 09:11:33 crc kubenswrapper[4834]: I0223 09:11:33.249580 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:33 crc kubenswrapper[4834]: I0223 09:11:33.263178 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 23 09:11:34 crc kubenswrapper[4834]: I0223 09:11:34.252983 4834 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f86d8e6-b3d5-402a-9f9b-568ac673d63c" Feb 23 09:11:34 crc kubenswrapper[4834]: I0223 09:11:34.253310 4834 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="1f86d8e6-b3d5-402a-9f9b-568ac673d63c" Feb 23 09:11:34 crc kubenswrapper[4834]: I0223 09:11:34.604437 4834 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="1618d60a-40f9-4ead-bedc-b9f401dffce2" Feb 23 09:11:35 crc kubenswrapper[4834]: I0223 09:11:35.013479 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 23 09:11:35 crc kubenswrapper[4834]: I0223 09:11:35.017720 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 23 09:11:35 crc kubenswrapper[4834]: I0223 09:11:35.267799 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Feb 23 09:11:42 crc kubenswrapper[4834]: I0223 09:11:42.121337 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Feb 23 09:11:42 crc kubenswrapper[4834]: I0223 09:11:42.141522 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Feb 23 09:11:42 crc kubenswrapper[4834]: I0223 09:11:42.388490 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Feb 23 09:11:42 crc kubenswrapper[4834]: I0223 09:11:42.466899 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Feb 23 09:11:42 crc kubenswrapper[4834]: I0223 09:11:42.947096 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Feb 23 09:11:43 crc kubenswrapper[4834]: I0223 09:11:43.499113 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Feb 23 09:11:43 crc kubenswrapper[4834]: I0223 09:11:43.541658 4834 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Feb 23 09:11:43 crc kubenswrapper[4834]: I0223 09:11:43.642007 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Feb 23 09:11:43 crc kubenswrapper[4834]: I0223 09:11:43.966690 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Feb 23 09:11:44 crc kubenswrapper[4834]: I0223 09:11:44.227061 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Feb 23 09:11:44 crc kubenswrapper[4834]: I0223 09:11:44.302007 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Feb 23 09:11:44 crc kubenswrapper[4834]: I0223 09:11:44.600735 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Feb 23 09:11:44 crc kubenswrapper[4834]: I0223 09:11:44.676711 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Feb 23 09:11:44 crc kubenswrapper[4834]: I0223 09:11:44.774337 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Feb 23 09:11:44 crc kubenswrapper[4834]: I0223 09:11:44.785920 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Feb 23 09:11:44 crc kubenswrapper[4834]: I0223 09:11:44.856069 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Feb 23 09:11:44 crc kubenswrapper[4834]: I0223 09:11:44.983093 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Feb 23 09:11:45 crc kubenswrapper[4834]: I0223 09:11:45.065651 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Feb 23 09:11:45 crc kubenswrapper[4834]: I0223 09:11:45.099123 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Feb 23 09:11:45 crc kubenswrapper[4834]: I0223 09:11:45.157340 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Feb 23 09:11:45 crc kubenswrapper[4834]: I0223 09:11:45.196064 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Feb 23 09:11:45 crc kubenswrapper[4834]: I0223 09:11:45.219635 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Feb 23 09:11:45 crc kubenswrapper[4834]: I0223 09:11:45.366843 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Feb 23 09:11:45 crc kubenswrapper[4834]: I0223 09:11:45.551524 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Feb 23 09:11:45 crc kubenswrapper[4834]: I0223 09:11:45.589703 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Feb 23 09:11:45 crc kubenswrapper[4834]: I0223 09:11:45.593561 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Feb 23 09:11:45 crc kubenswrapper[4834]: I0223 09:11:45.712658 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Feb 23 09:11:45 crc kubenswrapper[4834]: I0223 09:11:45.800061 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Feb 23 09:11:45 crc kubenswrapper[4834]: I0223 09:11:45.826506 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Feb 23 09:11:45 crc kubenswrapper[4834]: I0223 09:11:45.909736 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Feb 23 09:11:46 crc kubenswrapper[4834]: I0223 09:11:46.089623 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Feb 23 09:11:46 crc kubenswrapper[4834]: I0223 09:11:46.118636 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Feb 23 09:11:46 crc kubenswrapper[4834]: I0223 09:11:46.135452 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Feb 23 09:11:46 crc kubenswrapper[4834]: I0223 09:11:46.135450 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Feb 23 09:11:46 crc kubenswrapper[4834]: I0223 09:11:46.144188 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Feb 23 09:11:46 crc kubenswrapper[4834]: I0223 09:11:46.239361 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Feb 23 09:11:46 crc kubenswrapper[4834]: I0223 09:11:46.271974 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Feb 23 09:11:46 crc kubenswrapper[4834]: I0223 09:11:46.368853 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Feb 23 09:11:46 crc kubenswrapper[4834]: I0223 09:11:46.465115 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Feb 23 09:11:46 crc kubenswrapper[4834]: I0223 09:11:46.550685 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Feb 23 09:11:46 crc kubenswrapper[4834]: I0223 09:11:46.681784 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Feb 23 09:11:46 crc kubenswrapper[4834]: I0223 09:11:46.774373 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Feb 23 09:11:46 crc kubenswrapper[4834]: I0223 09:11:46.796751 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Feb 23 09:11:46 crc kubenswrapper[4834]: I0223 09:11:46.888539 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Feb 23 09:11:46 crc kubenswrapper[4834]: I0223 09:11:46.927300 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Feb 23 09:11:47 crc kubenswrapper[4834]: I0223 09:11:47.007808 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Feb 23 09:11:47 crc kubenswrapper[4834]: I0223 09:11:47.017798 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Feb 23 09:11:47 crc kubenswrapper[4834]: I0223 09:11:47.068388 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Feb 23 09:11:47 crc kubenswrapper[4834]: I0223 09:11:47.133119 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Feb 23 09:11:47 crc kubenswrapper[4834]: I0223 09:11:47.193918 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Feb 23 09:11:47 crc kubenswrapper[4834]: I0223 09:11:47.250613 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Feb 23 09:11:47 crc kubenswrapper[4834]: I0223 09:11:47.262094 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Feb 23 09:11:47 crc kubenswrapper[4834]: I0223 09:11:47.357474 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Feb 23 09:11:47 crc kubenswrapper[4834]: I0223 09:11:47.420553 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Feb 23 09:11:47 crc kubenswrapper[4834]: I0223 09:11:47.502253 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Feb 23 09:11:47 crc kubenswrapper[4834]: I0223 09:11:47.553252 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Feb 23 09:11:47 crc kubenswrapper[4834]: I0223 09:11:47.621576 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Feb 23 09:11:47 crc kubenswrapper[4834]: I0223 09:11:47.737546 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Feb 23 09:11:47 crc kubenswrapper[4834]: I0223 09:11:47.791550 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Feb 23 09:11:47 crc kubenswrapper[4834]: I0223 09:11:47.802264 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Feb 23 09:11:47 crc kubenswrapper[4834]: I0223 09:11:47.834849 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Feb 23 09:11:47 crc kubenswrapper[4834]: I0223 09:11:47.836718 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Feb 23 09:11:47 crc kubenswrapper[4834]: I0223 09:11:47.975439 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.011950 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.019529 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.058558 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.139313 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.210165 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.284591 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.337176 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.349729 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.355782 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.418426 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.442451 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.523976 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.537706 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.562472 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.578986 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.579689 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.619903 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.756177 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.796739 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.920987 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.924028 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Feb 23 09:11:48 crc kubenswrapper[4834]: I0223 09:11:48.932337 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Feb 23 09:11:49 crc kubenswrapper[4834]: I0223 09:11:49.015552 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Feb 23 09:11:49 crc kubenswrapper[4834]: I0223 09:11:49.049709 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Feb 23 09:11:49 crc kubenswrapper[4834]: I0223 09:11:49.110536 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Feb 23 09:11:49 crc kubenswrapper[4834]: I0223 09:11:49.130701 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Feb 23 09:11:49 crc kubenswrapper[4834]: I0223 09:11:49.133795 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Feb 23 09:11:49 crc kubenswrapper[4834]: I0223 09:11:49.190695 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Feb 23 09:11:49 crc kubenswrapper[4834]: I0223 09:11:49.376883 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Feb 23 09:11:49 crc kubenswrapper[4834]: I0223 09:11:49.396141 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Feb 23 09:11:49 crc kubenswrapper[4834]: I0223 09:11:49.457883 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Feb 23 09:11:49 crc kubenswrapper[4834]: I0223 09:11:49.469251 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Feb 23 09:11:49 crc kubenswrapper[4834]: I0223 09:11:49.494697 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Feb 23 09:11:49 crc kubenswrapper[4834]: I0223 09:11:49.515686 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Feb 23 09:11:49 crc kubenswrapper[4834]: I0223 09:11:49.805745 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Feb 23 09:11:49 crc kubenswrapper[4834]: I0223 09:11:49.819092 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Feb 23 09:11:49 crc kubenswrapper[4834]: I0223 09:11:49.847361 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Feb 23 09:11:49 crc kubenswrapper[4834]: I0223 09:11:49.981128 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Feb 23 09:11:50 crc kubenswrapper[4834]: I0223 09:11:50.097324 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Feb 23 09:11:50 crc kubenswrapper[4834]: I0223 09:11:50.137006 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Feb 23 09:11:50 crc kubenswrapper[4834]: I0223 09:11:50.151844 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Feb 23 09:11:50 crc kubenswrapper[4834]: I0223 09:11:50.159319 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Feb 23 09:11:50 crc kubenswrapper[4834]: I0223 09:11:50.288622 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Feb 23 09:11:50 crc kubenswrapper[4834]: I0223 09:11:50.302478 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Feb 23 09:11:50 crc kubenswrapper[4834]: I0223 09:11:50.336652 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Feb 23 09:11:50 crc kubenswrapper[4834]: I0223 09:11:50.595935 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Feb 23 09:11:50 crc kubenswrapper[4834]: I0223 09:11:50.636871 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Feb 23 09:11:50 crc kubenswrapper[4834]: I0223 09:11:50.638603 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Feb 23 09:11:50 crc kubenswrapper[4834]: I0223 09:11:50.691797 4834 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Feb 23 09:11:50 crc kubenswrapper[4834]: I0223 09:11:50.772628 4834 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Feb 23 09:11:50 crc kubenswrapper[4834]: I0223 09:11:50.889241 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Feb 23 09:11:50 crc kubenswrapper[4834]: I0223 09:11:50.937364 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Feb 23 09:11:50 crc kubenswrapper[4834]: I0223 09:11:50.939035 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Feb 23 09:11:50 crc kubenswrapper[4834]: I0223 09:11:50.993859 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Feb 23 09:11:51 crc kubenswrapper[4834]: I0223 09:11:51.005502 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Feb 23 09:11:51 crc kubenswrapper[4834]: I0223 09:11:51.019581 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Feb 23 09:11:51 crc kubenswrapper[4834]: I0223 09:11:51.020962 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Feb 23 09:11:51 crc kubenswrapper[4834]: I0223 09:11:51.091558 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Feb 23 09:11:51 crc kubenswrapper[4834]: I0223 09:11:51.099849 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Feb 23 09:11:51 crc kubenswrapper[4834]: I0223 09:11:51.140294 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Feb 23 09:11:51 crc kubenswrapper[4834]: I0223 09:11:51.207916 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Feb 23 09:11:51 crc kubenswrapper[4834]: I0223 09:11:51.253273 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Feb 23 09:11:51 crc kubenswrapper[4834]: I0223 09:11:51.367314 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Feb 23 09:11:51 crc kubenswrapper[4834]: I0223 09:11:51.447758 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Feb 23 09:11:51 crc kubenswrapper[4834]: I0223 09:11:51.497958 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Feb 23 09:11:51 crc kubenswrapper[4834]: I0223 09:11:51.548982 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Feb 23 09:11:51 crc kubenswrapper[4834]: I0223 09:11:51.618714 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Feb 23 09:11:51 crc kubenswrapper[4834]: I0223 09:11:51.644062 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Feb 23 09:11:51 crc kubenswrapper[4834]: I0223 09:11:51.712373 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Feb 23 09:11:51 crc kubenswrapper[4834]: I0223 09:11:51.714372 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Feb 23 09:11:51 crc kubenswrapper[4834]: I0223 09:11:51.785299 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Feb 23 09:11:51 crc kubenswrapper[4834]: I0223 09:11:51.805577 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Feb 23 09:11:51 crc kubenswrapper[4834]: I0223 09:11:51.981019 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Feb 23 09:11:52 crc kubenswrapper[4834]: I0223 09:11:52.005173 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Feb 23 09:11:52 crc kubenswrapper[4834]: I0223 09:11:52.047464 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Feb 23 09:11:52 crc kubenswrapper[4834]: I0223 09:11:52.053683 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Feb 23 09:11:52 crc kubenswrapper[4834]: I0223 09:11:52.121521 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Feb 23 09:11:52 crc kubenswrapper[4834]: I0223 09:11:52.131360 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Feb 23 09:11:52 crc kubenswrapper[4834]: I0223 09:11:52.160658 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Feb 23 09:11:52 crc kubenswrapper[4834]: I0223 09:11:52.173891 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Feb 23 09:11:52 crc kubenswrapper[4834]: I0223 09:11:52.518562 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Feb 23 09:11:52 crc kubenswrapper[4834]: I0223 09:11:52.562244 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Feb 23 09:11:52 crc kubenswrapper[4834]: I0223 09:11:52.566711 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Feb 23 09:11:52 crc kubenswrapper[4834]: I0223 09:11:52.578381 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Feb 23 09:11:52 crc kubenswrapper[4834]: I0223 09:11:52.590376 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Feb 23 09:11:52 crc kubenswrapper[4834]: I0223 09:11:52.898212 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Feb 23 09:11:52 crc kubenswrapper[4834]: I0223 09:11:52.898300 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Feb 23 09:11:52 crc kubenswrapper[4834]: I0223 09:11:52.933722 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Feb 23 09:11:53 crc kubenswrapper[4834]: I0223 09:11:53.102963 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Feb 23 09:11:53 crc kubenswrapper[4834]: I0223 09:11:53.149429 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Feb 23 09:11:53 crc kubenswrapper[4834]: I0223 09:11:53.156320 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Feb 23 09:11:53 crc kubenswrapper[4834]: I0223 09:11:53.226561 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Feb 23 09:11:53 crc kubenswrapper[4834]: I0223 09:11:53.320167 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Feb 23 09:11:53 crc kubenswrapper[4834]: I0223 09:11:53.355570 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Feb 23 09:11:53 crc kubenswrapper[4834]: I0223 09:11:53.408426 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Feb 23 09:11:53 crc kubenswrapper[4834]: I0223 09:11:53.464357 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Feb 23 09:11:53 crc kubenswrapper[4834]: I0223 09:11:53.634603 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Feb 23 09:11:53 crc kubenswrapper[4834]: I0223 09:11:53.688102 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Feb 23 09:11:53 crc kubenswrapper[4834]: I0223 09:11:53.711029 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Feb 23 09:11:53 crc kubenswrapper[4834]: I0223 09:11:53.759161 4834 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Feb 23 09:11:53 crc kubenswrapper[4834]: I0223 09:11:53.760713 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=42.76069467 podStartE2EDuration="42.76069467s" podCreationTimestamp="2026-02-23 09:11:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:11:31.966740469 +0000 UTC m=+228.045054876" watchObservedRunningTime="2026-02-23 09:11:53.76069467 +0000 UTC m=+249.839009057" Feb 23 09:11:53 crc kubenswrapper[4834]: I0223 09:11:53.763525 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Feb 23 09:11:53 crc kubenswrapper[4834]: I0223 09:11:53.763615 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Feb 23 09:11:53 crc kubenswrapper[4834]: I0223 09:11:53.767817 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Feb 23 09:11:53 crc kubenswrapper[4834]: I0223 09:11:53.786943 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=21.786891228 podStartE2EDuration="21.786891228s" podCreationTimestamp="2026-02-23 09:11:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:11:53.784367408 +0000 UTC m=+249.862681835" watchObservedRunningTime="2026-02-23 09:11:53.786891228 +0000 UTC m=+249.865205625" Feb 23 09:11:53 crc kubenswrapper[4834]: I0223 09:11:53.823364 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Feb 23 09:11:53 crc kubenswrapper[4834]: I0223 09:11:53.833090 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Feb 23 09:11:53 crc kubenswrapper[4834]: I0223 09:11:53.847889 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Feb 23 09:11:53 crc kubenswrapper[4834]: I0223 09:11:53.854126 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.029985 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.061458 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.061710 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.089153 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.123387 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.203253 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.214084 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.257763 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.272116 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.298503 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.347638 4834 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.424227 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.488265 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.525851 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.581613 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.656386 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.678605 4834 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.679013 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://3aeb849089017f7cc033417e2973ea0906eb4949d1c4ccaadabc8922acee6f4f" gracePeriod=5 Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.692038 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.751632 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.894926 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.898386 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.911613 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.912169 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Feb 23 09:11:54 crc kubenswrapper[4834]: I0223 09:11:54.952595 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Feb 23 09:11:55 crc kubenswrapper[4834]: I0223 09:11:55.042680 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Feb 23 09:11:55 crc kubenswrapper[4834]: I0223 09:11:55.086258 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Feb 23 09:11:55 crc kubenswrapper[4834]: I0223 09:11:55.102283 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Feb 23 09:11:55 crc kubenswrapper[4834]: I0223 09:11:55.151701 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Feb 23 09:11:55 crc kubenswrapper[4834]: I0223 09:11:55.152079 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Feb 23 09:11:55 crc kubenswrapper[4834]: I0223 09:11:55.308233 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Feb 23 09:11:55 crc kubenswrapper[4834]: I0223 09:11:55.347842 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Feb 23 09:11:55 crc kubenswrapper[4834]: I0223 09:11:55.599457 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Feb 23 09:11:55 crc kubenswrapper[4834]: I0223 09:11:55.633252 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Feb 23 09:11:55 crc kubenswrapper[4834]: I0223 09:11:55.703302 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Feb 23 09:11:55 crc kubenswrapper[4834]: I0223 09:11:55.755926 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Feb 23 09:11:55 crc kubenswrapper[4834]: I0223 09:11:55.763054 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Feb 23 09:11:55 crc kubenswrapper[4834]: I0223 09:11:55.831464 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Feb 23 09:11:55 crc kubenswrapper[4834]: I0223 09:11:55.837637 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Feb 23 09:11:55 crc kubenswrapper[4834]: I0223 09:11:55.855295 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Feb 23 09:11:55 crc kubenswrapper[4834]: I0223 09:11:55.889670 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Feb 23 09:11:55 crc kubenswrapper[4834]: I0223 09:11:55.945733 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Feb 23 09:11:56 crc kubenswrapper[4834]: I0223 09:11:56.072627 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Feb 23 09:11:56 crc kubenswrapper[4834]: I0223 09:11:56.086617 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Feb 23 09:11:56 crc kubenswrapper[4834]: I0223 09:11:56.143779 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Feb 23 09:11:56 crc kubenswrapper[4834]: I0223 09:11:56.227877 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Feb 23 09:11:56 crc kubenswrapper[4834]: I0223 09:11:56.320198 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Feb 23 09:11:56 crc kubenswrapper[4834]: I0223 09:11:56.364284 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Feb 23 09:11:56 crc kubenswrapper[4834]: I0223 09:11:56.408889 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Feb 23 09:11:56 crc kubenswrapper[4834]: I0223 09:11:56.467190 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Feb 23 09:11:56 crc kubenswrapper[4834]: I0223 09:11:56.478008 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Feb 23 09:11:56 crc kubenswrapper[4834]: I0223 09:11:56.484995 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Feb 23 09:11:56 crc kubenswrapper[4834]: I0223 09:11:56.570798 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Feb 23 09:11:56 crc kubenswrapper[4834]: I0223 09:11:56.721502 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Feb 23 09:11:56 crc kubenswrapper[4834]: I0223 09:11:56.732699 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Feb 23 09:11:56 crc kubenswrapper[4834]: I0223 09:11:56.792569 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Feb 23 09:11:56 crc kubenswrapper[4834]: I0223 09:11:56.886902 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Feb 23 09:11:56 crc kubenswrapper[4834]: I0223 09:11:56.935619 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Feb 23 09:11:57 crc kubenswrapper[4834]: I0223 09:11:57.084325 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Feb 23 09:11:57 crc kubenswrapper[4834]: I0223 09:11:57.098832 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Feb 23 09:11:57 crc kubenswrapper[4834]: I0223 09:11:57.131074 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Feb 23 09:11:57 crc kubenswrapper[4834]: I0223 09:11:57.265312 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Feb 23 09:11:57 crc kubenswrapper[4834]: I0223 09:11:57.329960 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Feb 23 09:11:57 crc kubenswrapper[4834]: I0223 09:11:57.351447 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Feb 23 09:11:57 crc kubenswrapper[4834]: I0223 09:11:57.472253 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Feb 23 09:11:57 crc kubenswrapper[4834]: I0223 09:11:57.637111 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Feb 23 09:11:57 crc kubenswrapper[4834]: I0223 09:11:57.649239 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Feb 23 09:11:57 crc kubenswrapper[4834]: I0223 09:11:57.787051 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Feb 23 09:11:57 crc kubenswrapper[4834]: I0223 09:11:57.789284 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Feb 23 09:11:57 crc kubenswrapper[4834]: I0223 09:11:57.811018 4834 patch_prober.go:28] interesting pod/machine-config-daemon-kt9lp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 23 09:11:57 crc kubenswrapper[4834]: I0223 09:11:57.811098 4834 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" podUID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 23 09:11:57 crc kubenswrapper[4834]: I0223 09:11:57.932959 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Feb 23 09:11:57 crc kubenswrapper[4834]: I0223 09:11:57.979575 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Feb 23 09:11:58 crc kubenswrapper[4834]: I0223 09:11:58.037083 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Feb 23 09:11:58 crc kubenswrapper[4834]: I0223 09:11:58.390723 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Feb 23 09:11:58 crc kubenswrapper[4834]: I0223 09:11:58.611539 4834 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Feb 23 09:11:58 crc kubenswrapper[4834]: I0223 09:11:58.634959 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Feb 23 09:11:58 crc kubenswrapper[4834]: I0223 09:11:58.685725 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Feb 23 09:11:58 crc kubenswrapper[4834]: I0223 09:11:58.721139 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Feb 23 09:11:58 crc kubenswrapper[4834]: I0223 09:11:58.767055 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Feb 23 09:11:58 crc kubenswrapper[4834]: I0223 09:11:58.774041 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Feb 23 09:11:58 crc kubenswrapper[4834]: I0223 09:11:58.947225 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.100313 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.226358 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.345841 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.470978 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.647351 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.718282 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.829846 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.877532 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5vvxt"] Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.877788 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5vvxt" podUID="19479380-b603-400a-99e9-6b8186f42f33" containerName="registry-server" containerID="cri-o://97cce5f8185524e49f6123b364bfaad63410b3b78bda2d948a50e9fbcf2cfa0a" gracePeriod=30 Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.888617 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jw46p"] Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.888901 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jw46p" podUID="67be3aab-67ec-42d2-9158-efe9b6ee13e7" containerName="registry-server" containerID="cri-o://8d934ecdd3c06588a8ae1dec0591791bf95a45f065d23ffbf851d5ca5ad727e6" gracePeriod=30 Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.899918 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-hlc9q"] Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.900142 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" podUID="2c9de897-17f3-4444-ad95-b5e07b40f6c8" containerName="marketplace-operator" containerID="cri-o://7eb81f4583ac1003fb2dad925150d48185d53433020a43b32683e5bb137ddd49" gracePeriod=30 Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.909783 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7vzgv"] Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.910040 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7vzgv" podUID="d37845a1-60c4-4708-b671-42d20f6a9b34" containerName="registry-server" containerID="cri-o://5f711532503b6688378876a8e28585b1006463ec4f2ff2e5273714fd09e6577b" gracePeriod=30 Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.915077 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4qbwp"] Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.917598 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4qbwp" podUID="f20021a4-12a6-49ae-a85f-cc8bdb6051d3" containerName="registry-server" containerID="cri-o://50796e5f4c364f4e55373310d7951f4dce95a9e88380343969274d1ebcd34f97" gracePeriod=30 Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.937048 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5954q"] Feb 23 09:11:59 crc kubenswrapper[4834]: E0223 09:11:59.937506 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.937604 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Feb 23 09:11:59 crc kubenswrapper[4834]: E0223 09:11:59.937713 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="71f4a070-36cd-4f2b-8b37-1f93aeaee935" containerName="installer" Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.937796 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="71f4a070-36cd-4f2b-8b37-1f93aeaee935" containerName="installer" Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.937993 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.938097 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="71f4a070-36cd-4f2b-8b37-1f93aeaee935" containerName="installer" Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.938731 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-5954q" Feb 23 09:11:59 crc kubenswrapper[4834]: I0223 09:11:59.951209 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5954q"] Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.100428 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/5bb580b6-b9ee-4c5c-9154-f6b58eac9ad8-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-5954q\" (UID: \"5bb580b6-b9ee-4c5c-9154-f6b58eac9ad8\") " pod="openshift-marketplace/marketplace-operator-79b997595-5954q" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.100471 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5bb580b6-b9ee-4c5c-9154-f6b58eac9ad8-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-5954q\" (UID: \"5bb580b6-b9ee-4c5c-9154-f6b58eac9ad8\") " pod="openshift-marketplace/marketplace-operator-79b997595-5954q" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.100492 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5skmj\" (UniqueName: \"kubernetes.io/projected/5bb580b6-b9ee-4c5c-9154-f6b58eac9ad8-kube-api-access-5skmj\") pod \"marketplace-operator-79b997595-5954q\" (UID: \"5bb580b6-b9ee-4c5c-9154-f6b58eac9ad8\") " pod="openshift-marketplace/marketplace-operator-79b997595-5954q" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.201740 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5bb580b6-b9ee-4c5c-9154-f6b58eac9ad8-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-5954q\" (UID: \"5bb580b6-b9ee-4c5c-9154-f6b58eac9ad8\") " pod="openshift-marketplace/marketplace-operator-79b997595-5954q" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.201787 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/5bb580b6-b9ee-4c5c-9154-f6b58eac9ad8-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-5954q\" (UID: \"5bb580b6-b9ee-4c5c-9154-f6b58eac9ad8\") " pod="openshift-marketplace/marketplace-operator-79b997595-5954q" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.201806 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5skmj\" (UniqueName: \"kubernetes.io/projected/5bb580b6-b9ee-4c5c-9154-f6b58eac9ad8-kube-api-access-5skmj\") pod \"marketplace-operator-79b997595-5954q\" (UID: \"5bb580b6-b9ee-4c5c-9154-f6b58eac9ad8\") " pod="openshift-marketplace/marketplace-operator-79b997595-5954q" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.203577 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/5bb580b6-b9ee-4c5c-9154-f6b58eac9ad8-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-5954q\" (UID: \"5bb580b6-b9ee-4c5c-9154-f6b58eac9ad8\") " pod="openshift-marketplace/marketplace-operator-79b997595-5954q" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.207841 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/5bb580b6-b9ee-4c5c-9154-f6b58eac9ad8-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-5954q\" (UID: \"5bb580b6-b9ee-4c5c-9154-f6b58eac9ad8\") " pod="openshift-marketplace/marketplace-operator-79b997595-5954q" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.218878 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5skmj\" (UniqueName: \"kubernetes.io/projected/5bb580b6-b9ee-4c5c-9154-f6b58eac9ad8-kube-api-access-5skmj\") pod \"marketplace-operator-79b997595-5954q\" (UID: \"5bb580b6-b9ee-4c5c-9154-f6b58eac9ad8\") " pod="openshift-marketplace/marketplace-operator-79b997595-5954q" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.384895 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-5954q" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.389060 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.389376 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.393756 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5vvxt" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.410605 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7l2f5\" (UniqueName: \"kubernetes.io/projected/19479380-b603-400a-99e9-6b8186f42f33-kube-api-access-7l2f5\") pod \"19479380-b603-400a-99e9-6b8186f42f33\" (UID: \"19479380-b603-400a-99e9-6b8186f42f33\") " Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.410663 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.410805 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19479380-b603-400a-99e9-6b8186f42f33-catalog-content\") pod \"19479380-b603-400a-99e9-6b8186f42f33\" (UID: \"19479380-b603-400a-99e9-6b8186f42f33\") " Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.410831 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19479380-b603-400a-99e9-6b8186f42f33-utilities\") pod \"19479380-b603-400a-99e9-6b8186f42f33\" (UID: \"19479380-b603-400a-99e9-6b8186f42f33\") " Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.410850 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.410922 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.410954 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.410982 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.412518 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.412588 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.413232 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.413297 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.414468 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19479380-b603-400a-99e9-6b8186f42f33-utilities" (OuterVolumeSpecName: "utilities") pod "19479380-b603-400a-99e9-6b8186f42f33" (UID: "19479380-b603-400a-99e9-6b8186f42f33"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.416237 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19479380-b603-400a-99e9-6b8186f42f33-kube-api-access-7l2f5" (OuterVolumeSpecName: "kube-api-access-7l2f5") pod "19479380-b603-400a-99e9-6b8186f42f33" (UID: "19479380-b603-400a-99e9-6b8186f42f33"). InnerVolumeSpecName "kube-api-access-7l2f5". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.431281 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.443299 4834 generic.go:334] "Generic (PLEG): container finished" podID="2c9de897-17f3-4444-ad95-b5e07b40f6c8" containerID="7eb81f4583ac1003fb2dad925150d48185d53433020a43b32683e5bb137ddd49" exitCode=0 Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.443366 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" event={"ID":"2c9de897-17f3-4444-ad95-b5e07b40f6c8","Type":"ContainerDied","Data":"7eb81f4583ac1003fb2dad925150d48185d53433020a43b32683e5bb137ddd49"} Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.447962 4834 generic.go:334] "Generic (PLEG): container finished" podID="f20021a4-12a6-49ae-a85f-cc8bdb6051d3" containerID="50796e5f4c364f4e55373310d7951f4dce95a9e88380343969274d1ebcd34f97" exitCode=0 Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.448049 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4qbwp" event={"ID":"f20021a4-12a6-49ae-a85f-cc8bdb6051d3","Type":"ContainerDied","Data":"50796e5f4c364f4e55373310d7951f4dce95a9e88380343969274d1ebcd34f97"} Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.453382 4834 generic.go:334] "Generic (PLEG): container finished" podID="67be3aab-67ec-42d2-9158-efe9b6ee13e7" containerID="8d934ecdd3c06588a8ae1dec0591791bf95a45f065d23ffbf851d5ca5ad727e6" exitCode=0 Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.453447 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jw46p" event={"ID":"67be3aab-67ec-42d2-9158-efe9b6ee13e7","Type":"ContainerDied","Data":"8d934ecdd3c06588a8ae1dec0591791bf95a45f065d23ffbf851d5ca5ad727e6"} Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.457374 4834 generic.go:334] "Generic (PLEG): container finished" podID="19479380-b603-400a-99e9-6b8186f42f33" containerID="97cce5f8185524e49f6123b364bfaad63410b3b78bda2d948a50e9fbcf2cfa0a" exitCode=0 Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.457456 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5vvxt" event={"ID":"19479380-b603-400a-99e9-6b8186f42f33","Type":"ContainerDied","Data":"97cce5f8185524e49f6123b364bfaad63410b3b78bda2d948a50e9fbcf2cfa0a"} Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.457479 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5vvxt" event={"ID":"19479380-b603-400a-99e9-6b8186f42f33","Type":"ContainerDied","Data":"8b036bc0faf2489dd8e6c11c8acdf8c83c992b630d221efb50492e1d3c453ba9"} Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.457498 4834 scope.go:117] "RemoveContainer" containerID="97cce5f8185524e49f6123b364bfaad63410b3b78bda2d948a50e9fbcf2cfa0a" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.457507 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5vvxt" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.461383 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.461460 4834 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="3aeb849089017f7cc033417e2973ea0906eb4949d1c4ccaadabc8922acee6f4f" exitCode=137 Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.461652 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.464830 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/19479380-b603-400a-99e9-6b8186f42f33-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "19479380-b603-400a-99e9-6b8186f42f33" (UID: "19479380-b603-400a-99e9-6b8186f42f33"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.466774 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jw46p" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.470124 4834 generic.go:334] "Generic (PLEG): container finished" podID="d37845a1-60c4-4708-b671-42d20f6a9b34" containerID="5f711532503b6688378876a8e28585b1006463ec4f2ff2e5273714fd09e6577b" exitCode=0 Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.470189 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7vzgv" event={"ID":"d37845a1-60c4-4708-b671-42d20f6a9b34","Type":"ContainerDied","Data":"5f711532503b6688378876a8e28585b1006463ec4f2ff2e5273714fd09e6577b"} Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.470999 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.505807 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7vzgv" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.511243 4834 scope.go:117] "RemoveContainer" containerID="2ad9d1b521e9d0630b7d26c4f5fc5d6ecd9dd7e1bee7b4659a9998fd998a85dc" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.511738 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-89srs\" (UniqueName: \"kubernetes.io/projected/67be3aab-67ec-42d2-9158-efe9b6ee13e7-kube-api-access-89srs\") pod \"67be3aab-67ec-42d2-9158-efe9b6ee13e7\" (UID: \"67be3aab-67ec-42d2-9158-efe9b6ee13e7\") " Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.511796 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d37845a1-60c4-4708-b671-42d20f6a9b34-catalog-content\") pod \"d37845a1-60c4-4708-b671-42d20f6a9b34\" (UID: \"d37845a1-60c4-4708-b671-42d20f6a9b34\") " Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.511838 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4thfs\" (UniqueName: \"kubernetes.io/projected/2c9de897-17f3-4444-ad95-b5e07b40f6c8-kube-api-access-4thfs\") pod \"2c9de897-17f3-4444-ad95-b5e07b40f6c8\" (UID: \"2c9de897-17f3-4444-ad95-b5e07b40f6c8\") " Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.511865 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67be3aab-67ec-42d2-9158-efe9b6ee13e7-catalog-content\") pod \"67be3aab-67ec-42d2-9158-efe9b6ee13e7\" (UID: \"67be3aab-67ec-42d2-9158-efe9b6ee13e7\") " Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.511886 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r2gth\" (UniqueName: \"kubernetes.io/projected/d37845a1-60c4-4708-b671-42d20f6a9b34-kube-api-access-r2gth\") pod \"d37845a1-60c4-4708-b671-42d20f6a9b34\" (UID: \"d37845a1-60c4-4708-b671-42d20f6a9b34\") " Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.511922 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67be3aab-67ec-42d2-9158-efe9b6ee13e7-utilities\") pod \"67be3aab-67ec-42d2-9158-efe9b6ee13e7\" (UID: \"67be3aab-67ec-42d2-9158-efe9b6ee13e7\") " Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.511969 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2c9de897-17f3-4444-ad95-b5e07b40f6c8-marketplace-trusted-ca\") pod \"2c9de897-17f3-4444-ad95-b5e07b40f6c8\" (UID: \"2c9de897-17f3-4444-ad95-b5e07b40f6c8\") " Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.511994 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2c9de897-17f3-4444-ad95-b5e07b40f6c8-marketplace-operator-metrics\") pod \"2c9de897-17f3-4444-ad95-b5e07b40f6c8\" (UID: \"2c9de897-17f3-4444-ad95-b5e07b40f6c8\") " Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.512021 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d37845a1-60c4-4708-b671-42d20f6a9b34-utilities\") pod \"d37845a1-60c4-4708-b671-42d20f6a9b34\" (UID: \"d37845a1-60c4-4708-b671-42d20f6a9b34\") " Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.512192 4834 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/19479380-b603-400a-99e9-6b8186f42f33-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.512216 4834 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/19479380-b603-400a-99e9-6b8186f42f33-utilities\") on node \"crc\" DevicePath \"\"" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.512229 4834 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.512239 4834 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.512250 4834 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.512261 4834 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.512269 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7l2f5\" (UniqueName: \"kubernetes.io/projected/19479380-b603-400a-99e9-6b8186f42f33-kube-api-access-7l2f5\") on node \"crc\" DevicePath \"\"" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.512280 4834 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.513329 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d37845a1-60c4-4708-b671-42d20f6a9b34-utilities" (OuterVolumeSpecName: "utilities") pod "d37845a1-60c4-4708-b671-42d20f6a9b34" (UID: "d37845a1-60c4-4708-b671-42d20f6a9b34"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.516309 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2c9de897-17f3-4444-ad95-b5e07b40f6c8-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "2c9de897-17f3-4444-ad95-b5e07b40f6c8" (UID: "2c9de897-17f3-4444-ad95-b5e07b40f6c8"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.517027 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67be3aab-67ec-42d2-9158-efe9b6ee13e7-kube-api-access-89srs" (OuterVolumeSpecName: "kube-api-access-89srs") pod "67be3aab-67ec-42d2-9158-efe9b6ee13e7" (UID: "67be3aab-67ec-42d2-9158-efe9b6ee13e7"). InnerVolumeSpecName "kube-api-access-89srs". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.520266 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d37845a1-60c4-4708-b671-42d20f6a9b34-kube-api-access-r2gth" (OuterVolumeSpecName: "kube-api-access-r2gth") pod "d37845a1-60c4-4708-b671-42d20f6a9b34" (UID: "d37845a1-60c4-4708-b671-42d20f6a9b34"). InnerVolumeSpecName "kube-api-access-r2gth". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.521793 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2c9de897-17f3-4444-ad95-b5e07b40f6c8-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "2c9de897-17f3-4444-ad95-b5e07b40f6c8" (UID: "2c9de897-17f3-4444-ad95-b5e07b40f6c8"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.523899 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2c9de897-17f3-4444-ad95-b5e07b40f6c8-kube-api-access-4thfs" (OuterVolumeSpecName: "kube-api-access-4thfs") pod "2c9de897-17f3-4444-ad95-b5e07b40f6c8" (UID: "2c9de897-17f3-4444-ad95-b5e07b40f6c8"). InnerVolumeSpecName "kube-api-access-4thfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.532476 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67be3aab-67ec-42d2-9158-efe9b6ee13e7-utilities" (OuterVolumeSpecName: "utilities") pod "67be3aab-67ec-42d2-9158-efe9b6ee13e7" (UID: "67be3aab-67ec-42d2-9158-efe9b6ee13e7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.543244 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4qbwp" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.546777 4834 scope.go:117] "RemoveContainer" containerID="de41d4bc6c3994a8cc9bee90e631ff724658bd9ee4dd20239a95081e3acc547e" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.554681 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d37845a1-60c4-4708-b671-42d20f6a9b34-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d37845a1-60c4-4708-b671-42d20f6a9b34" (UID: "d37845a1-60c4-4708-b671-42d20f6a9b34"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.564146 4834 scope.go:117] "RemoveContainer" containerID="97cce5f8185524e49f6123b364bfaad63410b3b78bda2d948a50e9fbcf2cfa0a" Feb 23 09:12:00 crc kubenswrapper[4834]: E0223 09:12:00.564898 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"97cce5f8185524e49f6123b364bfaad63410b3b78bda2d948a50e9fbcf2cfa0a\": container with ID starting with 97cce5f8185524e49f6123b364bfaad63410b3b78bda2d948a50e9fbcf2cfa0a not found: ID does not exist" containerID="97cce5f8185524e49f6123b364bfaad63410b3b78bda2d948a50e9fbcf2cfa0a" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.564924 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"97cce5f8185524e49f6123b364bfaad63410b3b78bda2d948a50e9fbcf2cfa0a"} err="failed to get container status \"97cce5f8185524e49f6123b364bfaad63410b3b78bda2d948a50e9fbcf2cfa0a\": rpc error: code = NotFound desc = could not find container \"97cce5f8185524e49f6123b364bfaad63410b3b78bda2d948a50e9fbcf2cfa0a\": container with ID starting with 97cce5f8185524e49f6123b364bfaad63410b3b78bda2d948a50e9fbcf2cfa0a not found: ID does not exist" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.564942 4834 scope.go:117] "RemoveContainer" containerID="2ad9d1b521e9d0630b7d26c4f5fc5d6ecd9dd7e1bee7b4659a9998fd998a85dc" Feb 23 09:12:00 crc kubenswrapper[4834]: E0223 09:12:00.565213 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ad9d1b521e9d0630b7d26c4f5fc5d6ecd9dd7e1bee7b4659a9998fd998a85dc\": container with ID starting with 2ad9d1b521e9d0630b7d26c4f5fc5d6ecd9dd7e1bee7b4659a9998fd998a85dc not found: ID does not exist" containerID="2ad9d1b521e9d0630b7d26c4f5fc5d6ecd9dd7e1bee7b4659a9998fd998a85dc" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.565426 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ad9d1b521e9d0630b7d26c4f5fc5d6ecd9dd7e1bee7b4659a9998fd998a85dc"} err="failed to get container status \"2ad9d1b521e9d0630b7d26c4f5fc5d6ecd9dd7e1bee7b4659a9998fd998a85dc\": rpc error: code = NotFound desc = could not find container \"2ad9d1b521e9d0630b7d26c4f5fc5d6ecd9dd7e1bee7b4659a9998fd998a85dc\": container with ID starting with 2ad9d1b521e9d0630b7d26c4f5fc5d6ecd9dd7e1bee7b4659a9998fd998a85dc not found: ID does not exist" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.565454 4834 scope.go:117] "RemoveContainer" containerID="de41d4bc6c3994a8cc9bee90e631ff724658bd9ee4dd20239a95081e3acc547e" Feb 23 09:12:00 crc kubenswrapper[4834]: E0223 09:12:00.565858 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de41d4bc6c3994a8cc9bee90e631ff724658bd9ee4dd20239a95081e3acc547e\": container with ID starting with de41d4bc6c3994a8cc9bee90e631ff724658bd9ee4dd20239a95081e3acc547e not found: ID does not exist" containerID="de41d4bc6c3994a8cc9bee90e631ff724658bd9ee4dd20239a95081e3acc547e" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.565882 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de41d4bc6c3994a8cc9bee90e631ff724658bd9ee4dd20239a95081e3acc547e"} err="failed to get container status \"de41d4bc6c3994a8cc9bee90e631ff724658bd9ee4dd20239a95081e3acc547e\": rpc error: code = NotFound desc = could not find container \"de41d4bc6c3994a8cc9bee90e631ff724658bd9ee4dd20239a95081e3acc547e\": container with ID starting with de41d4bc6c3994a8cc9bee90e631ff724658bd9ee4dd20239a95081e3acc547e not found: ID does not exist" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.565899 4834 scope.go:117] "RemoveContainer" containerID="3aeb849089017f7cc033417e2973ea0906eb4949d1c4ccaadabc8922acee6f4f" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.582847 4834 scope.go:117] "RemoveContainer" containerID="3aeb849089017f7cc033417e2973ea0906eb4949d1c4ccaadabc8922acee6f4f" Feb 23 09:12:00 crc kubenswrapper[4834]: E0223 09:12:00.583447 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3aeb849089017f7cc033417e2973ea0906eb4949d1c4ccaadabc8922acee6f4f\": container with ID starting with 3aeb849089017f7cc033417e2973ea0906eb4949d1c4ccaadabc8922acee6f4f not found: ID does not exist" containerID="3aeb849089017f7cc033417e2973ea0906eb4949d1c4ccaadabc8922acee6f4f" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.583482 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3aeb849089017f7cc033417e2973ea0906eb4949d1c4ccaadabc8922acee6f4f"} err="failed to get container status \"3aeb849089017f7cc033417e2973ea0906eb4949d1c4ccaadabc8922acee6f4f\": rpc error: code = NotFound desc = could not find container \"3aeb849089017f7cc033417e2973ea0906eb4949d1c4ccaadabc8922acee6f4f\": container with ID starting with 3aeb849089017f7cc033417e2973ea0906eb4949d1c4ccaadabc8922acee6f4f not found: ID does not exist" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.591237 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.591477 4834 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.595102 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67be3aab-67ec-42d2-9158-efe9b6ee13e7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "67be3aab-67ec-42d2-9158-efe9b6ee13e7" (UID: "67be3aab-67ec-42d2-9158-efe9b6ee13e7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.601653 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.601694 4834 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="c45e9306-0562-4260-aa39-262eba5fac90" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.605093 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.605131 4834 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="c45e9306-0562-4260-aa39-262eba5fac90" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.612940 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-524z2\" (UniqueName: \"kubernetes.io/projected/f20021a4-12a6-49ae-a85f-cc8bdb6051d3-kube-api-access-524z2\") pod \"f20021a4-12a6-49ae-a85f-cc8bdb6051d3\" (UID: \"f20021a4-12a6-49ae-a85f-cc8bdb6051d3\") " Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.613027 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f20021a4-12a6-49ae-a85f-cc8bdb6051d3-catalog-content\") pod \"f20021a4-12a6-49ae-a85f-cc8bdb6051d3\" (UID: \"f20021a4-12a6-49ae-a85f-cc8bdb6051d3\") " Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.613059 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f20021a4-12a6-49ae-a85f-cc8bdb6051d3-utilities\") pod \"f20021a4-12a6-49ae-a85f-cc8bdb6051d3\" (UID: \"f20021a4-12a6-49ae-a85f-cc8bdb6051d3\") " Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.613194 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-89srs\" (UniqueName: \"kubernetes.io/projected/67be3aab-67ec-42d2-9158-efe9b6ee13e7-kube-api-access-89srs\") on node \"crc\" DevicePath \"\"" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.613204 4834 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d37845a1-60c4-4708-b671-42d20f6a9b34-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.613213 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4thfs\" (UniqueName: \"kubernetes.io/projected/2c9de897-17f3-4444-ad95-b5e07b40f6c8-kube-api-access-4thfs\") on node \"crc\" DevicePath \"\"" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.613221 4834 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67be3aab-67ec-42d2-9158-efe9b6ee13e7-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.613230 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r2gth\" (UniqueName: \"kubernetes.io/projected/d37845a1-60c4-4708-b671-42d20f6a9b34-kube-api-access-r2gth\") on node \"crc\" DevicePath \"\"" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.613239 4834 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67be3aab-67ec-42d2-9158-efe9b6ee13e7-utilities\") on node \"crc\" DevicePath \"\"" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.613247 4834 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2c9de897-17f3-4444-ad95-b5e07b40f6c8-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.613255 4834 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/2c9de897-17f3-4444-ad95-b5e07b40f6c8-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.613264 4834 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d37845a1-60c4-4708-b671-42d20f6a9b34-utilities\") on node \"crc\" DevicePath \"\"" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.614424 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f20021a4-12a6-49ae-a85f-cc8bdb6051d3-utilities" (OuterVolumeSpecName: "utilities") pod "f20021a4-12a6-49ae-a85f-cc8bdb6051d3" (UID: "f20021a4-12a6-49ae-a85f-cc8bdb6051d3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.615933 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f20021a4-12a6-49ae-a85f-cc8bdb6051d3-kube-api-access-524z2" (OuterVolumeSpecName: "kube-api-access-524z2") pod "f20021a4-12a6-49ae-a85f-cc8bdb6051d3" (UID: "f20021a4-12a6-49ae-a85f-cc8bdb6051d3"). InnerVolumeSpecName "kube-api-access-524z2". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.714115 4834 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f20021a4-12a6-49ae-a85f-cc8bdb6051d3-utilities\") on node \"crc\" DevicePath \"\"" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.714171 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-524z2\" (UniqueName: \"kubernetes.io/projected/f20021a4-12a6-49ae-a85f-cc8bdb6051d3-kube-api-access-524z2\") on node \"crc\" DevicePath \"\"" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.740457 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f20021a4-12a6-49ae-a85f-cc8bdb6051d3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f20021a4-12a6-49ae-a85f-cc8bdb6051d3" (UID: "f20021a4-12a6-49ae-a85f-cc8bdb6051d3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.783365 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5vvxt"] Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.789559 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5vvxt"] Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.815978 4834 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f20021a4-12a6-49ae-a85f-cc8bdb6051d3-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 23 09:12:00 crc kubenswrapper[4834]: I0223 09:12:00.825140 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-5954q"] Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.478761 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jw46p" event={"ID":"67be3aab-67ec-42d2-9158-efe9b6ee13e7","Type":"ContainerDied","Data":"1e548dee2426db2f2a28dd4bcfe67049a304951a507201c8a78c44dbc83b8fcc"} Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.478845 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jw46p" Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.479140 4834 scope.go:117] "RemoveContainer" containerID="8d934ecdd3c06588a8ae1dec0591791bf95a45f065d23ffbf851d5ca5ad727e6" Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.487124 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7vzgv" event={"ID":"d37845a1-60c4-4708-b671-42d20f6a9b34","Type":"ContainerDied","Data":"fdd610d1623bf6c8298088923f3df8090407da9490202eb45872da2bf8274071"} Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.487160 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7vzgv" Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.489577 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" event={"ID":"2c9de897-17f3-4444-ad95-b5e07b40f6c8","Type":"ContainerDied","Data":"45f772b5eea9ac90ace0bdddc67f74973cc3eaea9b4048fd2e77f90014365c55"} Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.489693 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-hlc9q" Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.492210 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-5954q" event={"ID":"5bb580b6-b9ee-4c5c-9154-f6b58eac9ad8","Type":"ContainerStarted","Data":"483b4678677e2ce2032ce8c9f2d96eba7c7da01b0a17186777b9b5d892b806ab"} Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.492517 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-5954q" event={"ID":"5bb580b6-b9ee-4c5c-9154-f6b58eac9ad8","Type":"ContainerStarted","Data":"fcd74d0ae1e93c5238b56571fc9395ff9d7178bab9b798b0a7967b8093defe76"} Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.493153 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-5954q" Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.495244 4834 scope.go:117] "RemoveContainer" containerID="46f23e24d9f3c230f7207f1bd6dc737b09dc24e38a02745d2a201235e0ed3055" Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.496008 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4qbwp" event={"ID":"f20021a4-12a6-49ae-a85f-cc8bdb6051d3","Type":"ContainerDied","Data":"758e74389d590b701fddef7fcaa84b2ac1dbcf59ec72a7d05997a851bf7e1b84"} Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.496573 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4qbwp" Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.507345 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-5954q" Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.517716 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jw46p"] Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.522693 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jw46p"] Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.529619 4834 scope.go:117] "RemoveContainer" containerID="2c0edafdc67967d0d5408e6e7a9de9a96d79addef6cee56cad0a9348fa8583a2" Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.531861 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-5954q" podStartSLOduration=2.531845025 podStartE2EDuration="2.531845025s" podCreationTimestamp="2026-02-23 09:11:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:12:01.526746531 +0000 UTC m=+257.605060928" watchObservedRunningTime="2026-02-23 09:12:01.531845025 +0000 UTC m=+257.610159412" Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.540811 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-hlc9q"] Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.548203 4834 scope.go:117] "RemoveContainer" containerID="5f711532503b6688378876a8e28585b1006463ec4f2ff2e5273714fd09e6577b" Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.548673 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-hlc9q"] Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.552967 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7vzgv"] Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.558391 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-7vzgv"] Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.563138 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4qbwp"] Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.566109 4834 scope.go:117] "RemoveContainer" containerID="a023f5182377bcb47e1cd94ddb70c1b2018ff2983c25a95291a28f80d3923df8" Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.566762 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4qbwp"] Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.639410 4834 scope.go:117] "RemoveContainer" containerID="efd20e7f992157fee6a513b7e47e3b9ff42b4e522e0196958f78e6171b1bb5ff" Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.659846 4834 scope.go:117] "RemoveContainer" containerID="7eb81f4583ac1003fb2dad925150d48185d53433020a43b32683e5bb137ddd49" Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.670507 4834 scope.go:117] "RemoveContainer" containerID="50796e5f4c364f4e55373310d7951f4dce95a9e88380343969274d1ebcd34f97" Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.680942 4834 scope.go:117] "RemoveContainer" containerID="08b2d4eff732882d0905f6e666eef4e9574c187d7b86c62b5a47c4e21ce6fe14" Feb 23 09:12:01 crc kubenswrapper[4834]: I0223 09:12:01.693478 4834 scope.go:117] "RemoveContainer" containerID="e33267c681bcbf3330e8922779bb01e3a5114c288c6c51bf2df4630abdffc0c9" Feb 23 09:12:02 crc kubenswrapper[4834]: I0223 09:12:02.594041 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19479380-b603-400a-99e9-6b8186f42f33" path="/var/lib/kubelet/pods/19479380-b603-400a-99e9-6b8186f42f33/volumes" Feb 23 09:12:02 crc kubenswrapper[4834]: I0223 09:12:02.595591 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2c9de897-17f3-4444-ad95-b5e07b40f6c8" path="/var/lib/kubelet/pods/2c9de897-17f3-4444-ad95-b5e07b40f6c8/volumes" Feb 23 09:12:02 crc kubenswrapper[4834]: I0223 09:12:02.596355 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67be3aab-67ec-42d2-9158-efe9b6ee13e7" path="/var/lib/kubelet/pods/67be3aab-67ec-42d2-9158-efe9b6ee13e7/volumes" Feb 23 09:12:02 crc kubenswrapper[4834]: I0223 09:12:02.597954 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d37845a1-60c4-4708-b671-42d20f6a9b34" path="/var/lib/kubelet/pods/d37845a1-60c4-4708-b671-42d20f6a9b34/volumes" Feb 23 09:12:02 crc kubenswrapper[4834]: I0223 09:12:02.599065 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f20021a4-12a6-49ae-a85f-cc8bdb6051d3" path="/var/lib/kubelet/pods/f20021a4-12a6-49ae-a85f-cc8bdb6051d3/volumes" Feb 23 09:12:27 crc kubenswrapper[4834]: I0223 09:12:27.809887 4834 patch_prober.go:28] interesting pod/machine-config-daemon-kt9lp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 23 09:12:27 crc kubenswrapper[4834]: I0223 09:12:27.810308 4834 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" podUID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 23 09:12:27 crc kubenswrapper[4834]: I0223 09:12:27.810354 4834 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" Feb 23 09:12:27 crc kubenswrapper[4834]: I0223 09:12:27.810864 4834 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5f0104afd41d2190f8d4b34e88bbfed12c35766c1fe0c2a0e109ae44cbac3345"} pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 23 09:12:27 crc kubenswrapper[4834]: I0223 09:12:27.810907 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" podUID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerName="machine-config-daemon" containerID="cri-o://5f0104afd41d2190f8d4b34e88bbfed12c35766c1fe0c2a0e109ae44cbac3345" gracePeriod=600 Feb 23 09:12:28 crc kubenswrapper[4834]: I0223 09:12:28.640707 4834 generic.go:334] "Generic (PLEG): container finished" podID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerID="5f0104afd41d2190f8d4b34e88bbfed12c35766c1fe0c2a0e109ae44cbac3345" exitCode=0 Feb 23 09:12:28 crc kubenswrapper[4834]: I0223 09:12:28.640786 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" event={"ID":"1172b9a5-71ca-49e9-a033-3b59c9c024a4","Type":"ContainerDied","Data":"5f0104afd41d2190f8d4b34e88bbfed12c35766c1fe0c2a0e109ae44cbac3345"} Feb 23 09:12:28 crc kubenswrapper[4834]: I0223 09:12:28.641966 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" event={"ID":"1172b9a5-71ca-49e9-a033-3b59c9c024a4","Type":"ContainerStarted","Data":"88a28e626207b9996f70605163951b8b51203e8d24ce2c7e8948a63be9984191"} Feb 23 09:12:44 crc kubenswrapper[4834]: I0223 09:12:44.388553 4834 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.442782 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-wrptv"] Feb 23 09:13:26 crc kubenswrapper[4834]: E0223 09:13:26.443551 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19479380-b603-400a-99e9-6b8186f42f33" containerName="registry-server" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.443568 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="19479380-b603-400a-99e9-6b8186f42f33" containerName="registry-server" Feb 23 09:13:26 crc kubenswrapper[4834]: E0223 09:13:26.443584 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f20021a4-12a6-49ae-a85f-cc8bdb6051d3" containerName="extract-utilities" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.443592 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="f20021a4-12a6-49ae-a85f-cc8bdb6051d3" containerName="extract-utilities" Feb 23 09:13:26 crc kubenswrapper[4834]: E0223 09:13:26.443603 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67be3aab-67ec-42d2-9158-efe9b6ee13e7" containerName="extract-utilities" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.443613 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="67be3aab-67ec-42d2-9158-efe9b6ee13e7" containerName="extract-utilities" Feb 23 09:13:26 crc kubenswrapper[4834]: E0223 09:13:26.443625 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f20021a4-12a6-49ae-a85f-cc8bdb6051d3" containerName="extract-content" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.443632 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="f20021a4-12a6-49ae-a85f-cc8bdb6051d3" containerName="extract-content" Feb 23 09:13:26 crc kubenswrapper[4834]: E0223 09:13:26.443643 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19479380-b603-400a-99e9-6b8186f42f33" containerName="extract-content" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.443650 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="19479380-b603-400a-99e9-6b8186f42f33" containerName="extract-content" Feb 23 09:13:26 crc kubenswrapper[4834]: E0223 09:13:26.443660 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67be3aab-67ec-42d2-9158-efe9b6ee13e7" containerName="extract-content" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.443667 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="67be3aab-67ec-42d2-9158-efe9b6ee13e7" containerName="extract-content" Feb 23 09:13:26 crc kubenswrapper[4834]: E0223 09:13:26.443677 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67be3aab-67ec-42d2-9158-efe9b6ee13e7" containerName="registry-server" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.443684 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="67be3aab-67ec-42d2-9158-efe9b6ee13e7" containerName="registry-server" Feb 23 09:13:26 crc kubenswrapper[4834]: E0223 09:13:26.443695 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d37845a1-60c4-4708-b671-42d20f6a9b34" containerName="extract-content" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.443702 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="d37845a1-60c4-4708-b671-42d20f6a9b34" containerName="extract-content" Feb 23 09:13:26 crc kubenswrapper[4834]: E0223 09:13:26.443713 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19479380-b603-400a-99e9-6b8186f42f33" containerName="extract-utilities" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.443721 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="19479380-b603-400a-99e9-6b8186f42f33" containerName="extract-utilities" Feb 23 09:13:26 crc kubenswrapper[4834]: E0223 09:13:26.443732 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d37845a1-60c4-4708-b671-42d20f6a9b34" containerName="extract-utilities" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.443739 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="d37845a1-60c4-4708-b671-42d20f6a9b34" containerName="extract-utilities" Feb 23 09:13:26 crc kubenswrapper[4834]: E0223 09:13:26.443746 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d37845a1-60c4-4708-b671-42d20f6a9b34" containerName="registry-server" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.443754 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="d37845a1-60c4-4708-b671-42d20f6a9b34" containerName="registry-server" Feb 23 09:13:26 crc kubenswrapper[4834]: E0223 09:13:26.443768 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2c9de897-17f3-4444-ad95-b5e07b40f6c8" containerName="marketplace-operator" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.443775 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="2c9de897-17f3-4444-ad95-b5e07b40f6c8" containerName="marketplace-operator" Feb 23 09:13:26 crc kubenswrapper[4834]: E0223 09:13:26.443787 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f20021a4-12a6-49ae-a85f-cc8bdb6051d3" containerName="registry-server" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.443795 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="f20021a4-12a6-49ae-a85f-cc8bdb6051d3" containerName="registry-server" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.443908 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="f20021a4-12a6-49ae-a85f-cc8bdb6051d3" containerName="registry-server" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.443923 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="67be3aab-67ec-42d2-9158-efe9b6ee13e7" containerName="registry-server" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.443933 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="19479380-b603-400a-99e9-6b8186f42f33" containerName="registry-server" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.443943 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="2c9de897-17f3-4444-ad95-b5e07b40f6c8" containerName="marketplace-operator" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.443952 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="d37845a1-60c4-4708-b671-42d20f6a9b34" containerName="registry-server" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.444414 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.455996 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-wrptv"] Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.595686 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.595787 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/edee4ad4-972a-42ad-a665-be54f51a5aac-ca-trust-extracted\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.595836 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/edee4ad4-972a-42ad-a665-be54f51a5aac-registry-certificates\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.595885 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/edee4ad4-972a-42ad-a665-be54f51a5aac-registry-tls\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.595910 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/edee4ad4-972a-42ad-a665-be54f51a5aac-installation-pull-secrets\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.595942 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/edee4ad4-972a-42ad-a665-be54f51a5aac-trusted-ca\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.595975 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-567kd\" (UniqueName: \"kubernetes.io/projected/edee4ad4-972a-42ad-a665-be54f51a5aac-kube-api-access-567kd\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.596006 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/edee4ad4-972a-42ad-a665-be54f51a5aac-bound-sa-token\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.618500 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.697071 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/edee4ad4-972a-42ad-a665-be54f51a5aac-registry-certificates\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.697147 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/edee4ad4-972a-42ad-a665-be54f51a5aac-registry-tls\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.697177 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/edee4ad4-972a-42ad-a665-be54f51a5aac-installation-pull-secrets\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.697222 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/edee4ad4-972a-42ad-a665-be54f51a5aac-trusted-ca\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.697249 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-567kd\" (UniqueName: \"kubernetes.io/projected/edee4ad4-972a-42ad-a665-be54f51a5aac-kube-api-access-567kd\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.697287 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/edee4ad4-972a-42ad-a665-be54f51a5aac-bound-sa-token\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.697341 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/edee4ad4-972a-42ad-a665-be54f51a5aac-ca-trust-extracted\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.697862 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/edee4ad4-972a-42ad-a665-be54f51a5aac-ca-trust-extracted\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.698842 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/edee4ad4-972a-42ad-a665-be54f51a5aac-trusted-ca\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.699823 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/edee4ad4-972a-42ad-a665-be54f51a5aac-registry-certificates\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.708203 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/edee4ad4-972a-42ad-a665-be54f51a5aac-registry-tls\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.708393 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/edee4ad4-972a-42ad-a665-be54f51a5aac-installation-pull-secrets\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.717248 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-567kd\" (UniqueName: \"kubernetes.io/projected/edee4ad4-972a-42ad-a665-be54f51a5aac-kube-api-access-567kd\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.719946 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/edee4ad4-972a-42ad-a665-be54f51a5aac-bound-sa-token\") pod \"image-registry-66df7c8f76-wrptv\" (UID: \"edee4ad4-972a-42ad-a665-be54f51a5aac\") " pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.760080 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:26 crc kubenswrapper[4834]: I0223 09:13:26.940188 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-wrptv"] Feb 23 09:13:27 crc kubenswrapper[4834]: I0223 09:13:27.956382 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" event={"ID":"edee4ad4-972a-42ad-a665-be54f51a5aac","Type":"ContainerStarted","Data":"f8137b3bf3263ef69c10282ca63423fbeff904ed3bae7a14ad429d9c1a448626"} Feb 23 09:13:27 crc kubenswrapper[4834]: I0223 09:13:27.958018 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" event={"ID":"edee4ad4-972a-42ad-a665-be54f51a5aac","Type":"ContainerStarted","Data":"fef83a80b19204c85dce7a5cd1087d280540f93f1256ccdac9648118fbf36f51"} Feb 23 09:13:27 crc kubenswrapper[4834]: I0223 09:13:27.958152 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:27 crc kubenswrapper[4834]: I0223 09:13:27.977580 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" podStartSLOduration=1.977565604 podStartE2EDuration="1.977565604s" podCreationTimestamp="2026-02-23 09:13:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:13:27.973027318 +0000 UTC m=+344.051341705" watchObservedRunningTime="2026-02-23 09:13:27.977565604 +0000 UTC m=+344.055879991" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.533005 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-b2sb8"] Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.534906 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b2sb8" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.537682 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.543661 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b2sb8"] Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.660426 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f51e05f-82f7-4011-8801-0d387547ba12-catalog-content\") pod \"redhat-marketplace-b2sb8\" (UID: \"6f51e05f-82f7-4011-8801-0d387547ba12\") " pod="openshift-marketplace/redhat-marketplace-b2sb8" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.660704 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvbgh\" (UniqueName: \"kubernetes.io/projected/6f51e05f-82f7-4011-8801-0d387547ba12-kube-api-access-cvbgh\") pod \"redhat-marketplace-b2sb8\" (UID: \"6f51e05f-82f7-4011-8801-0d387547ba12\") " pod="openshift-marketplace/redhat-marketplace-b2sb8" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.660791 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f51e05f-82f7-4011-8801-0d387547ba12-utilities\") pod \"redhat-marketplace-b2sb8\" (UID: \"6f51e05f-82f7-4011-8801-0d387547ba12\") " pod="openshift-marketplace/redhat-marketplace-b2sb8" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.727914 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-lrv4x"] Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.729099 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lrv4x" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.730964 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.743702 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lrv4x"] Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.762528 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvbgh\" (UniqueName: \"kubernetes.io/projected/6f51e05f-82f7-4011-8801-0d387547ba12-kube-api-access-cvbgh\") pod \"redhat-marketplace-b2sb8\" (UID: \"6f51e05f-82f7-4011-8801-0d387547ba12\") " pod="openshift-marketplace/redhat-marketplace-b2sb8" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.762588 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f51e05f-82f7-4011-8801-0d387547ba12-utilities\") pod \"redhat-marketplace-b2sb8\" (UID: \"6f51e05f-82f7-4011-8801-0d387547ba12\") " pod="openshift-marketplace/redhat-marketplace-b2sb8" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.762641 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f51e05f-82f7-4011-8801-0d387547ba12-catalog-content\") pod \"redhat-marketplace-b2sb8\" (UID: \"6f51e05f-82f7-4011-8801-0d387547ba12\") " pod="openshift-marketplace/redhat-marketplace-b2sb8" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.763302 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6f51e05f-82f7-4011-8801-0d387547ba12-catalog-content\") pod \"redhat-marketplace-b2sb8\" (UID: \"6f51e05f-82f7-4011-8801-0d387547ba12\") " pod="openshift-marketplace/redhat-marketplace-b2sb8" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.763379 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6f51e05f-82f7-4011-8801-0d387547ba12-utilities\") pod \"redhat-marketplace-b2sb8\" (UID: \"6f51e05f-82f7-4011-8801-0d387547ba12\") " pod="openshift-marketplace/redhat-marketplace-b2sb8" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.783518 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvbgh\" (UniqueName: \"kubernetes.io/projected/6f51e05f-82f7-4011-8801-0d387547ba12-kube-api-access-cvbgh\") pod \"redhat-marketplace-b2sb8\" (UID: \"6f51e05f-82f7-4011-8801-0d387547ba12\") " pod="openshift-marketplace/redhat-marketplace-b2sb8" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.854881 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b2sb8" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.863572 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qz5g7\" (UniqueName: \"kubernetes.io/projected/6e4ac398-147a-4179-b898-bf5e8df2e333-kube-api-access-qz5g7\") pod \"certified-operators-lrv4x\" (UID: \"6e4ac398-147a-4179-b898-bf5e8df2e333\") " pod="openshift-marketplace/certified-operators-lrv4x" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.863650 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e4ac398-147a-4179-b898-bf5e8df2e333-utilities\") pod \"certified-operators-lrv4x\" (UID: \"6e4ac398-147a-4179-b898-bf5e8df2e333\") " pod="openshift-marketplace/certified-operators-lrv4x" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.863695 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e4ac398-147a-4179-b898-bf5e8df2e333-catalog-content\") pod \"certified-operators-lrv4x\" (UID: \"6e4ac398-147a-4179-b898-bf5e8df2e333\") " pod="openshift-marketplace/certified-operators-lrv4x" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.964844 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qz5g7\" (UniqueName: \"kubernetes.io/projected/6e4ac398-147a-4179-b898-bf5e8df2e333-kube-api-access-qz5g7\") pod \"certified-operators-lrv4x\" (UID: \"6e4ac398-147a-4179-b898-bf5e8df2e333\") " pod="openshift-marketplace/certified-operators-lrv4x" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.964948 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e4ac398-147a-4179-b898-bf5e8df2e333-utilities\") pod \"certified-operators-lrv4x\" (UID: \"6e4ac398-147a-4179-b898-bf5e8df2e333\") " pod="openshift-marketplace/certified-operators-lrv4x" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.965000 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e4ac398-147a-4179-b898-bf5e8df2e333-catalog-content\") pod \"certified-operators-lrv4x\" (UID: \"6e4ac398-147a-4179-b898-bf5e8df2e333\") " pod="openshift-marketplace/certified-operators-lrv4x" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.965772 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6e4ac398-147a-4179-b898-bf5e8df2e333-utilities\") pod \"certified-operators-lrv4x\" (UID: \"6e4ac398-147a-4179-b898-bf5e8df2e333\") " pod="openshift-marketplace/certified-operators-lrv4x" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.965862 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6e4ac398-147a-4179-b898-bf5e8df2e333-catalog-content\") pod \"certified-operators-lrv4x\" (UID: \"6e4ac398-147a-4179-b898-bf5e8df2e333\") " pod="openshift-marketplace/certified-operators-lrv4x" Feb 23 09:13:31 crc kubenswrapper[4834]: I0223 09:13:31.998473 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qz5g7\" (UniqueName: \"kubernetes.io/projected/6e4ac398-147a-4179-b898-bf5e8df2e333-kube-api-access-qz5g7\") pod \"certified-operators-lrv4x\" (UID: \"6e4ac398-147a-4179-b898-bf5e8df2e333\") " pod="openshift-marketplace/certified-operators-lrv4x" Feb 23 09:13:32 crc kubenswrapper[4834]: I0223 09:13:32.043302 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-lrv4x" Feb 23 09:13:32 crc kubenswrapper[4834]: I0223 09:13:32.269012 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b2sb8"] Feb 23 09:13:32 crc kubenswrapper[4834]: I0223 09:13:32.441372 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-lrv4x"] Feb 23 09:13:32 crc kubenswrapper[4834]: W0223 09:13:32.472837 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6e4ac398_147a_4179_b898_bf5e8df2e333.slice/crio-e640fdaa797ba31ab1b44c3f2e54990388c0d505d78060d13c6985407817c3d9 WatchSource:0}: Error finding container e640fdaa797ba31ab1b44c3f2e54990388c0d505d78060d13c6985407817c3d9: Status 404 returned error can't find the container with id e640fdaa797ba31ab1b44c3f2e54990388c0d505d78060d13c6985407817c3d9 Feb 23 09:13:33 crc kubenswrapper[4834]: I0223 09:13:33.021358 4834 generic.go:334] "Generic (PLEG): container finished" podID="6f51e05f-82f7-4011-8801-0d387547ba12" containerID="ca417e685d091de4c5ab7a18fcbd2a89955f4dd58f4c4207f508b03cd9fec72f" exitCode=0 Feb 23 09:13:33 crc kubenswrapper[4834]: I0223 09:13:33.021431 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b2sb8" event={"ID":"6f51e05f-82f7-4011-8801-0d387547ba12","Type":"ContainerDied","Data":"ca417e685d091de4c5ab7a18fcbd2a89955f4dd58f4c4207f508b03cd9fec72f"} Feb 23 09:13:33 crc kubenswrapper[4834]: I0223 09:13:33.021719 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b2sb8" event={"ID":"6f51e05f-82f7-4011-8801-0d387547ba12","Type":"ContainerStarted","Data":"8d7a550d36d1072fc47172b359e8c35cb0b24f982934f45bf53719bd6cd4a18e"} Feb 23 09:13:33 crc kubenswrapper[4834]: I0223 09:13:33.025651 4834 generic.go:334] "Generic (PLEG): container finished" podID="6e4ac398-147a-4179-b898-bf5e8df2e333" containerID="63bff007bc5ef3707ffcc6908143581bf6906e9f97724f662cab8a447711deb2" exitCode=0 Feb 23 09:13:33 crc kubenswrapper[4834]: I0223 09:13:33.025691 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lrv4x" event={"ID":"6e4ac398-147a-4179-b898-bf5e8df2e333","Type":"ContainerDied","Data":"63bff007bc5ef3707ffcc6908143581bf6906e9f97724f662cab8a447711deb2"} Feb 23 09:13:33 crc kubenswrapper[4834]: I0223 09:13:33.025717 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lrv4x" event={"ID":"6e4ac398-147a-4179-b898-bf5e8df2e333","Type":"ContainerStarted","Data":"e640fdaa797ba31ab1b44c3f2e54990388c0d505d78060d13c6985407817c3d9"} Feb 23 09:13:33 crc kubenswrapper[4834]: I0223 09:13:33.929293 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hzj74"] Feb 23 09:13:33 crc kubenswrapper[4834]: I0223 09:13:33.930511 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hzj74" Feb 23 09:13:33 crc kubenswrapper[4834]: I0223 09:13:33.934972 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Feb 23 09:13:33 crc kubenswrapper[4834]: I0223 09:13:33.939973 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hzj74"] Feb 23 09:13:33 crc kubenswrapper[4834]: I0223 09:13:33.991571 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98fa75d2-5267-47d5-9640-881c6f8ce155-utilities\") pod \"redhat-operators-hzj74\" (UID: \"98fa75d2-5267-47d5-9640-881c6f8ce155\") " pod="openshift-marketplace/redhat-operators-hzj74" Feb 23 09:13:33 crc kubenswrapper[4834]: I0223 09:13:33.991638 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5bn8q\" (UniqueName: \"kubernetes.io/projected/98fa75d2-5267-47d5-9640-881c6f8ce155-kube-api-access-5bn8q\") pod \"redhat-operators-hzj74\" (UID: \"98fa75d2-5267-47d5-9640-881c6f8ce155\") " pod="openshift-marketplace/redhat-operators-hzj74" Feb 23 09:13:33 crc kubenswrapper[4834]: I0223 09:13:33.991716 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98fa75d2-5267-47d5-9640-881c6f8ce155-catalog-content\") pod \"redhat-operators-hzj74\" (UID: \"98fa75d2-5267-47d5-9640-881c6f8ce155\") " pod="openshift-marketplace/redhat-operators-hzj74" Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.057752 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b2sb8" event={"ID":"6f51e05f-82f7-4011-8801-0d387547ba12","Type":"ContainerStarted","Data":"be0147266babcc6fbf70f395ead62383543389033bde260351c8585f51345ed4"} Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.092946 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5bn8q\" (UniqueName: \"kubernetes.io/projected/98fa75d2-5267-47d5-9640-881c6f8ce155-kube-api-access-5bn8q\") pod \"redhat-operators-hzj74\" (UID: \"98fa75d2-5267-47d5-9640-881c6f8ce155\") " pod="openshift-marketplace/redhat-operators-hzj74" Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.093043 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98fa75d2-5267-47d5-9640-881c6f8ce155-catalog-content\") pod \"redhat-operators-hzj74\" (UID: \"98fa75d2-5267-47d5-9640-881c6f8ce155\") " pod="openshift-marketplace/redhat-operators-hzj74" Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.093116 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98fa75d2-5267-47d5-9640-881c6f8ce155-utilities\") pod \"redhat-operators-hzj74\" (UID: \"98fa75d2-5267-47d5-9640-881c6f8ce155\") " pod="openshift-marketplace/redhat-operators-hzj74" Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.093585 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98fa75d2-5267-47d5-9640-881c6f8ce155-utilities\") pod \"redhat-operators-hzj74\" (UID: \"98fa75d2-5267-47d5-9640-881c6f8ce155\") " pod="openshift-marketplace/redhat-operators-hzj74" Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.093834 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98fa75d2-5267-47d5-9640-881c6f8ce155-catalog-content\") pod \"redhat-operators-hzj74\" (UID: \"98fa75d2-5267-47d5-9640-881c6f8ce155\") " pod="openshift-marketplace/redhat-operators-hzj74" Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.122129 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5bn8q\" (UniqueName: \"kubernetes.io/projected/98fa75d2-5267-47d5-9640-881c6f8ce155-kube-api-access-5bn8q\") pod \"redhat-operators-hzj74\" (UID: \"98fa75d2-5267-47d5-9640-881c6f8ce155\") " pod="openshift-marketplace/redhat-operators-hzj74" Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.131311 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6kd6m"] Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.132587 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6kd6m" Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.137094 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.143508 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6kd6m"] Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.193945 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b775dec4-0c14-4ea4-b513-01fee306aa41-catalog-content\") pod \"community-operators-6kd6m\" (UID: \"b775dec4-0c14-4ea4-b513-01fee306aa41\") " pod="openshift-marketplace/community-operators-6kd6m" Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.193989 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xm75\" (UniqueName: \"kubernetes.io/projected/b775dec4-0c14-4ea4-b513-01fee306aa41-kube-api-access-6xm75\") pod \"community-operators-6kd6m\" (UID: \"b775dec4-0c14-4ea4-b513-01fee306aa41\") " pod="openshift-marketplace/community-operators-6kd6m" Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.194022 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b775dec4-0c14-4ea4-b513-01fee306aa41-utilities\") pod \"community-operators-6kd6m\" (UID: \"b775dec4-0c14-4ea4-b513-01fee306aa41\") " pod="openshift-marketplace/community-operators-6kd6m" Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.280426 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hzj74" Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.295017 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b775dec4-0c14-4ea4-b513-01fee306aa41-catalog-content\") pod \"community-operators-6kd6m\" (UID: \"b775dec4-0c14-4ea4-b513-01fee306aa41\") " pod="openshift-marketplace/community-operators-6kd6m" Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.295063 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xm75\" (UniqueName: \"kubernetes.io/projected/b775dec4-0c14-4ea4-b513-01fee306aa41-kube-api-access-6xm75\") pod \"community-operators-6kd6m\" (UID: \"b775dec4-0c14-4ea4-b513-01fee306aa41\") " pod="openshift-marketplace/community-operators-6kd6m" Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.295107 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b775dec4-0c14-4ea4-b513-01fee306aa41-utilities\") pod \"community-operators-6kd6m\" (UID: \"b775dec4-0c14-4ea4-b513-01fee306aa41\") " pod="openshift-marketplace/community-operators-6kd6m" Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.295559 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b775dec4-0c14-4ea4-b513-01fee306aa41-catalog-content\") pod \"community-operators-6kd6m\" (UID: \"b775dec4-0c14-4ea4-b513-01fee306aa41\") " pod="openshift-marketplace/community-operators-6kd6m" Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.295634 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b775dec4-0c14-4ea4-b513-01fee306aa41-utilities\") pod \"community-operators-6kd6m\" (UID: \"b775dec4-0c14-4ea4-b513-01fee306aa41\") " pod="openshift-marketplace/community-operators-6kd6m" Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.316794 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xm75\" (UniqueName: \"kubernetes.io/projected/b775dec4-0c14-4ea4-b513-01fee306aa41-kube-api-access-6xm75\") pod \"community-operators-6kd6m\" (UID: \"b775dec4-0c14-4ea4-b513-01fee306aa41\") " pod="openshift-marketplace/community-operators-6kd6m" Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.461017 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hzj74"] Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.461173 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6kd6m" Feb 23 09:13:34 crc kubenswrapper[4834]: W0223 09:13:34.468837 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98fa75d2_5267_47d5_9640_881c6f8ce155.slice/crio-f1bbc29a63b00c2d99bd6aeef12335e21d764d5e8824eda57e35138b39f1526a WatchSource:0}: Error finding container f1bbc29a63b00c2d99bd6aeef12335e21d764d5e8824eda57e35138b39f1526a: Status 404 returned error can't find the container with id f1bbc29a63b00c2d99bd6aeef12335e21d764d5e8824eda57e35138b39f1526a Feb 23 09:13:34 crc kubenswrapper[4834]: I0223 09:13:34.860538 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6kd6m"] Feb 23 09:13:35 crc kubenswrapper[4834]: I0223 09:13:35.064805 4834 generic.go:334] "Generic (PLEG): container finished" podID="6f51e05f-82f7-4011-8801-0d387547ba12" containerID="be0147266babcc6fbf70f395ead62383543389033bde260351c8585f51345ed4" exitCode=0 Feb 23 09:13:35 crc kubenswrapper[4834]: I0223 09:13:35.064883 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b2sb8" event={"ID":"6f51e05f-82f7-4011-8801-0d387547ba12","Type":"ContainerDied","Data":"be0147266babcc6fbf70f395ead62383543389033bde260351c8585f51345ed4"} Feb 23 09:13:35 crc kubenswrapper[4834]: I0223 09:13:35.066327 4834 generic.go:334] "Generic (PLEG): container finished" podID="98fa75d2-5267-47d5-9640-881c6f8ce155" containerID="39226e5b5105d56da1ff321efeff93a3d09958bde0a035d1ae20f4ee53403849" exitCode=0 Feb 23 09:13:35 crc kubenswrapper[4834]: I0223 09:13:35.066379 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hzj74" event={"ID":"98fa75d2-5267-47d5-9640-881c6f8ce155","Type":"ContainerDied","Data":"39226e5b5105d56da1ff321efeff93a3d09958bde0a035d1ae20f4ee53403849"} Feb 23 09:13:35 crc kubenswrapper[4834]: I0223 09:13:35.066412 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hzj74" event={"ID":"98fa75d2-5267-47d5-9640-881c6f8ce155","Type":"ContainerStarted","Data":"f1bbc29a63b00c2d99bd6aeef12335e21d764d5e8824eda57e35138b39f1526a"} Feb 23 09:13:35 crc kubenswrapper[4834]: I0223 09:13:35.071276 4834 generic.go:334] "Generic (PLEG): container finished" podID="6e4ac398-147a-4179-b898-bf5e8df2e333" containerID="cbf0452ff622958530b057a586cfe54799d2a7ef5727d127e512ed7f2c82b6e6" exitCode=0 Feb 23 09:13:35 crc kubenswrapper[4834]: I0223 09:13:35.071612 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lrv4x" event={"ID":"6e4ac398-147a-4179-b898-bf5e8df2e333","Type":"ContainerDied","Data":"cbf0452ff622958530b057a586cfe54799d2a7ef5727d127e512ed7f2c82b6e6"} Feb 23 09:13:35 crc kubenswrapper[4834]: I0223 09:13:35.074945 4834 generic.go:334] "Generic (PLEG): container finished" podID="b775dec4-0c14-4ea4-b513-01fee306aa41" containerID="4628961a88827f7cbc299929348d7b947401da9601bfb60b55cc60bf06d75cfc" exitCode=0 Feb 23 09:13:35 crc kubenswrapper[4834]: I0223 09:13:35.075138 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6kd6m" event={"ID":"b775dec4-0c14-4ea4-b513-01fee306aa41","Type":"ContainerDied","Data":"4628961a88827f7cbc299929348d7b947401da9601bfb60b55cc60bf06d75cfc"} Feb 23 09:13:35 crc kubenswrapper[4834]: I0223 09:13:35.075260 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6kd6m" event={"ID":"b775dec4-0c14-4ea4-b513-01fee306aa41","Type":"ContainerStarted","Data":"6352dc62936c0f311dc74d6c242dfe5fa7726894f4d5efccaa8a1f514d2a189f"} Feb 23 09:13:36 crc kubenswrapper[4834]: I0223 09:13:36.084953 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-lrv4x" event={"ID":"6e4ac398-147a-4179-b898-bf5e8df2e333","Type":"ContainerStarted","Data":"4c06de473dc7b70932c7f02ee3a7e3fa45a2b87e104fee372379bf58f3bffcd8"} Feb 23 09:13:36 crc kubenswrapper[4834]: I0223 09:13:36.109706 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-lrv4x" podStartSLOduration=2.202505448 podStartE2EDuration="5.109688491s" podCreationTimestamp="2026-02-23 09:13:31 +0000 UTC" firstStartedPulling="2026-02-23 09:13:33.02653911 +0000 UTC m=+349.104853497" lastFinishedPulling="2026-02-23 09:13:35.933722153 +0000 UTC m=+352.012036540" observedRunningTime="2026-02-23 09:13:36.107931443 +0000 UTC m=+352.186245840" watchObservedRunningTime="2026-02-23 09:13:36.109688491 +0000 UTC m=+352.188002878" Feb 23 09:13:37 crc kubenswrapper[4834]: I0223 09:13:37.091695 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hzj74" event={"ID":"98fa75d2-5267-47d5-9640-881c6f8ce155","Type":"ContainerStarted","Data":"26fa54f897bd5a5088e54bd8d15e595ee6aa844a70d99cfa7850681e8f3b310b"} Feb 23 09:13:37 crc kubenswrapper[4834]: I0223 09:13:37.093809 4834 generic.go:334] "Generic (PLEG): container finished" podID="b775dec4-0c14-4ea4-b513-01fee306aa41" containerID="e8ea6cd58225975a1dcae8bf5535fe9053fc48d33ee3f2073be985bc83803bfe" exitCode=0 Feb 23 09:13:37 crc kubenswrapper[4834]: I0223 09:13:37.093882 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6kd6m" event={"ID":"b775dec4-0c14-4ea4-b513-01fee306aa41","Type":"ContainerDied","Data":"e8ea6cd58225975a1dcae8bf5535fe9053fc48d33ee3f2073be985bc83803bfe"} Feb 23 09:13:37 crc kubenswrapper[4834]: I0223 09:13:37.097688 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b2sb8" event={"ID":"6f51e05f-82f7-4011-8801-0d387547ba12","Type":"ContainerStarted","Data":"64e5718fc48a9a48a394e36021765835a6f346e08019ccb5428bbd44829b62bd"} Feb 23 09:13:37 crc kubenswrapper[4834]: I0223 09:13:37.131648 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-b2sb8" podStartSLOduration=2.618011211 podStartE2EDuration="6.131631514s" podCreationTimestamp="2026-02-23 09:13:31 +0000 UTC" firstStartedPulling="2026-02-23 09:13:33.024068441 +0000 UTC m=+349.102382828" lastFinishedPulling="2026-02-23 09:13:36.537688744 +0000 UTC m=+352.616003131" observedRunningTime="2026-02-23 09:13:37.125223135 +0000 UTC m=+353.203537522" watchObservedRunningTime="2026-02-23 09:13:37.131631514 +0000 UTC m=+353.209945901" Feb 23 09:13:38 crc kubenswrapper[4834]: I0223 09:13:38.103074 4834 generic.go:334] "Generic (PLEG): container finished" podID="98fa75d2-5267-47d5-9640-881c6f8ce155" containerID="26fa54f897bd5a5088e54bd8d15e595ee6aa844a70d99cfa7850681e8f3b310b" exitCode=0 Feb 23 09:13:38 crc kubenswrapper[4834]: I0223 09:13:38.103137 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hzj74" event={"ID":"98fa75d2-5267-47d5-9640-881c6f8ce155","Type":"ContainerDied","Data":"26fa54f897bd5a5088e54bd8d15e595ee6aa844a70d99cfa7850681e8f3b310b"} Feb 23 09:13:38 crc kubenswrapper[4834]: I0223 09:13:38.105924 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6kd6m" event={"ID":"b775dec4-0c14-4ea4-b513-01fee306aa41","Type":"ContainerStarted","Data":"44e69bd24e681a5412759b8253dbf53d66c93b270bfe9e989d557156a15b73f4"} Feb 23 09:13:38 crc kubenswrapper[4834]: I0223 09:13:38.141249 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6kd6m" podStartSLOduration=1.7272127510000002 podStartE2EDuration="4.141226901s" podCreationTimestamp="2026-02-23 09:13:34 +0000 UTC" firstStartedPulling="2026-02-23 09:13:35.078293205 +0000 UTC m=+351.156607602" lastFinishedPulling="2026-02-23 09:13:37.492307365 +0000 UTC m=+353.570621752" observedRunningTime="2026-02-23 09:13:38.136894791 +0000 UTC m=+354.215209188" watchObservedRunningTime="2026-02-23 09:13:38.141226901 +0000 UTC m=+354.219541288" Feb 23 09:13:39 crc kubenswrapper[4834]: I0223 09:13:39.114076 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hzj74" event={"ID":"98fa75d2-5267-47d5-9640-881c6f8ce155","Type":"ContainerStarted","Data":"f08fedc6233d2e8d9d520fcf940a2f3688d5edfbf5b820a9e7ce1cade07d3e23"} Feb 23 09:13:41 crc kubenswrapper[4834]: I0223 09:13:41.855273 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-b2sb8" Feb 23 09:13:41 crc kubenswrapper[4834]: I0223 09:13:41.855675 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-b2sb8" Feb 23 09:13:41 crc kubenswrapper[4834]: I0223 09:13:41.900275 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-b2sb8" Feb 23 09:13:41 crc kubenswrapper[4834]: I0223 09:13:41.917168 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hzj74" podStartSLOduration=5.49290337 podStartE2EDuration="8.917152286s" podCreationTimestamp="2026-02-23 09:13:33 +0000 UTC" firstStartedPulling="2026-02-23 09:13:35.071296939 +0000 UTC m=+351.149611326" lastFinishedPulling="2026-02-23 09:13:38.495545855 +0000 UTC m=+354.573860242" observedRunningTime="2026-02-23 09:13:39.1379952 +0000 UTC m=+355.216309587" watchObservedRunningTime="2026-02-23 09:13:41.917152286 +0000 UTC m=+357.995466673" Feb 23 09:13:42 crc kubenswrapper[4834]: I0223 09:13:42.044039 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-lrv4x" Feb 23 09:13:42 crc kubenswrapper[4834]: I0223 09:13:42.044089 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-lrv4x" Feb 23 09:13:42 crc kubenswrapper[4834]: I0223 09:13:42.084494 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-lrv4x" Feb 23 09:13:42 crc kubenswrapper[4834]: I0223 09:13:42.183959 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-b2sb8" Feb 23 09:13:42 crc kubenswrapper[4834]: I0223 09:13:42.184249 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-lrv4x" Feb 23 09:13:44 crc kubenswrapper[4834]: I0223 09:13:44.281256 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hzj74" Feb 23 09:13:44 crc kubenswrapper[4834]: I0223 09:13:44.281333 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-hzj74" Feb 23 09:13:44 crc kubenswrapper[4834]: I0223 09:13:44.372198 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hzj74" Feb 23 09:13:44 crc kubenswrapper[4834]: I0223 09:13:44.462018 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6kd6m" Feb 23 09:13:44 crc kubenswrapper[4834]: I0223 09:13:44.462058 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6kd6m" Feb 23 09:13:44 crc kubenswrapper[4834]: I0223 09:13:44.515058 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6kd6m" Feb 23 09:13:45 crc kubenswrapper[4834]: I0223 09:13:45.185382 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hzj74" Feb 23 09:13:45 crc kubenswrapper[4834]: I0223 09:13:45.190629 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6kd6m" Feb 23 09:13:46 crc kubenswrapper[4834]: I0223 09:13:46.765252 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-wrptv" Feb 23 09:13:46 crc kubenswrapper[4834]: I0223 09:13:46.830984 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-rcwhk"] Feb 23 09:14:11 crc kubenswrapper[4834]: I0223 09:14:11.871680 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" podUID="022f3b26-adfd-4fb6-b5c8-4d363e57dc71" containerName="registry" containerID="cri-o://2e57c13d5fae95e69029148b8ab0761abae21c1ff8d2d4db18dbfb5a47ed88b2" gracePeriod=30 Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.238715 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.290128 4834 generic.go:334] "Generic (PLEG): container finished" podID="022f3b26-adfd-4fb6-b5c8-4d363e57dc71" containerID="2e57c13d5fae95e69029148b8ab0761abae21c1ff8d2d4db18dbfb5a47ed88b2" exitCode=0 Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.290162 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" event={"ID":"022f3b26-adfd-4fb6-b5c8-4d363e57dc71","Type":"ContainerDied","Data":"2e57c13d5fae95e69029148b8ab0761abae21c1ff8d2d4db18dbfb5a47ed88b2"} Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.290198 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" event={"ID":"022f3b26-adfd-4fb6-b5c8-4d363e57dc71","Type":"ContainerDied","Data":"dae63572e0de722f45a5664c39af71d29acd7c2ad65d4ab4cc65bc49233eeb7b"} Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.290215 4834 scope.go:117] "RemoveContainer" containerID="2e57c13d5fae95e69029148b8ab0761abae21c1ff8d2d4db18dbfb5a47ed88b2" Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.290652 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-rcwhk" Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.303336 4834 scope.go:117] "RemoveContainer" containerID="2e57c13d5fae95e69029148b8ab0761abae21c1ff8d2d4db18dbfb5a47ed88b2" Feb 23 09:14:12 crc kubenswrapper[4834]: E0223 09:14:12.303792 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e57c13d5fae95e69029148b8ab0761abae21c1ff8d2d4db18dbfb5a47ed88b2\": container with ID starting with 2e57c13d5fae95e69029148b8ab0761abae21c1ff8d2d4db18dbfb5a47ed88b2 not found: ID does not exist" containerID="2e57c13d5fae95e69029148b8ab0761abae21c1ff8d2d4db18dbfb5a47ed88b2" Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.303821 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e57c13d5fae95e69029148b8ab0761abae21c1ff8d2d4db18dbfb5a47ed88b2"} err="failed to get container status \"2e57c13d5fae95e69029148b8ab0761abae21c1ff8d2d4db18dbfb5a47ed88b2\": rpc error: code = NotFound desc = could not find container \"2e57c13d5fae95e69029148b8ab0761abae21c1ff8d2d4db18dbfb5a47ed88b2\": container with ID starting with 2e57c13d5fae95e69029148b8ab0761abae21c1ff8d2d4db18dbfb5a47ed88b2 not found: ID does not exist" Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.345292 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-bound-sa-token\") pod \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.345420 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-ca-trust-extracted\") pod \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.345453 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9bf46\" (UniqueName: \"kubernetes.io/projected/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-kube-api-access-9bf46\") pod \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.345645 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-trusted-ca\") pod \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.345672 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-registry-certificates\") pod \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.345888 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.345949 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-registry-tls\") pod \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.345985 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-installation-pull-secrets\") pod \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\" (UID: \"022f3b26-adfd-4fb6-b5c8-4d363e57dc71\") " Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.348208 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "022f3b26-adfd-4fb6-b5c8-4d363e57dc71" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.348221 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "022f3b26-adfd-4fb6-b5c8-4d363e57dc71" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.353810 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "022f3b26-adfd-4fb6-b5c8-4d363e57dc71" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.357600 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "022f3b26-adfd-4fb6-b5c8-4d363e57dc71" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.357615 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "022f3b26-adfd-4fb6-b5c8-4d363e57dc71" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.358200 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "022f3b26-adfd-4fb6-b5c8-4d363e57dc71" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.359779 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-kube-api-access-9bf46" (OuterVolumeSpecName: "kube-api-access-9bf46") pod "022f3b26-adfd-4fb6-b5c8-4d363e57dc71" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71"). InnerVolumeSpecName "kube-api-access-9bf46". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.365448 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "022f3b26-adfd-4fb6-b5c8-4d363e57dc71" (UID: "022f3b26-adfd-4fb6-b5c8-4d363e57dc71"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.447200 4834 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.447253 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9bf46\" (UniqueName: \"kubernetes.io/projected/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-kube-api-access-9bf46\") on node \"crc\" DevicePath \"\"" Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.447272 4834 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-trusted-ca\") on node \"crc\" DevicePath \"\"" Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.447283 4834 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-registry-certificates\") on node \"crc\" DevicePath \"\"" Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.447295 4834 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-registry-tls\") on node \"crc\" DevicePath \"\"" Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.447307 4834 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.447321 4834 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/022f3b26-adfd-4fb6-b5c8-4d363e57dc71-bound-sa-token\") on node \"crc\" DevicePath \"\"" Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.635515 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-rcwhk"] Feb 23 09:14:12 crc kubenswrapper[4834]: I0223 09:14:12.640039 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-rcwhk"] Feb 23 09:14:14 crc kubenswrapper[4834]: I0223 09:14:14.598436 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="022f3b26-adfd-4fb6-b5c8-4d363e57dc71" path="/var/lib/kubelet/pods/022f3b26-adfd-4fb6-b5c8-4d363e57dc71/volumes" Feb 23 09:14:57 crc kubenswrapper[4834]: I0223 09:14:57.810192 4834 patch_prober.go:28] interesting pod/machine-config-daemon-kt9lp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 23 09:14:57 crc kubenswrapper[4834]: I0223 09:14:57.810829 4834 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" podUID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 23 09:15:00 crc kubenswrapper[4834]: I0223 09:15:00.180055 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29530635-tw549"] Feb 23 09:15:00 crc kubenswrapper[4834]: E0223 09:15:00.180238 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="022f3b26-adfd-4fb6-b5c8-4d363e57dc71" containerName="registry" Feb 23 09:15:00 crc kubenswrapper[4834]: I0223 09:15:00.180249 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="022f3b26-adfd-4fb6-b5c8-4d363e57dc71" containerName="registry" Feb 23 09:15:00 crc kubenswrapper[4834]: I0223 09:15:00.180346 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="022f3b26-adfd-4fb6-b5c8-4d363e57dc71" containerName="registry" Feb 23 09:15:00 crc kubenswrapper[4834]: I0223 09:15:00.180726 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29530635-tw549" Feb 23 09:15:00 crc kubenswrapper[4834]: I0223 09:15:00.184262 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Feb 23 09:15:00 crc kubenswrapper[4834]: I0223 09:15:00.184303 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Feb 23 09:15:00 crc kubenswrapper[4834]: I0223 09:15:00.208948 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29530635-tw549"] Feb 23 09:15:00 crc kubenswrapper[4834]: I0223 09:15:00.282040 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/89316109-e91a-4e35-b311-0c7f9a4c133e-config-volume\") pod \"collect-profiles-29530635-tw549\" (UID: \"89316109-e91a-4e35-b311-0c7f9a4c133e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29530635-tw549" Feb 23 09:15:00 crc kubenswrapper[4834]: I0223 09:15:00.282103 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-265gz\" (UniqueName: \"kubernetes.io/projected/89316109-e91a-4e35-b311-0c7f9a4c133e-kube-api-access-265gz\") pod \"collect-profiles-29530635-tw549\" (UID: \"89316109-e91a-4e35-b311-0c7f9a4c133e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29530635-tw549" Feb 23 09:15:00 crc kubenswrapper[4834]: I0223 09:15:00.282165 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/89316109-e91a-4e35-b311-0c7f9a4c133e-secret-volume\") pod \"collect-profiles-29530635-tw549\" (UID: \"89316109-e91a-4e35-b311-0c7f9a4c133e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29530635-tw549" Feb 23 09:15:00 crc kubenswrapper[4834]: I0223 09:15:00.383313 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/89316109-e91a-4e35-b311-0c7f9a4c133e-config-volume\") pod \"collect-profiles-29530635-tw549\" (UID: \"89316109-e91a-4e35-b311-0c7f9a4c133e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29530635-tw549" Feb 23 09:15:00 crc kubenswrapper[4834]: I0223 09:15:00.383366 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-265gz\" (UniqueName: \"kubernetes.io/projected/89316109-e91a-4e35-b311-0c7f9a4c133e-kube-api-access-265gz\") pod \"collect-profiles-29530635-tw549\" (UID: \"89316109-e91a-4e35-b311-0c7f9a4c133e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29530635-tw549" Feb 23 09:15:00 crc kubenswrapper[4834]: I0223 09:15:00.383434 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/89316109-e91a-4e35-b311-0c7f9a4c133e-secret-volume\") pod \"collect-profiles-29530635-tw549\" (UID: \"89316109-e91a-4e35-b311-0c7f9a4c133e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29530635-tw549" Feb 23 09:15:00 crc kubenswrapper[4834]: I0223 09:15:00.384900 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/89316109-e91a-4e35-b311-0c7f9a4c133e-config-volume\") pod \"collect-profiles-29530635-tw549\" (UID: \"89316109-e91a-4e35-b311-0c7f9a4c133e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29530635-tw549" Feb 23 09:15:00 crc kubenswrapper[4834]: I0223 09:15:00.389872 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/89316109-e91a-4e35-b311-0c7f9a4c133e-secret-volume\") pod \"collect-profiles-29530635-tw549\" (UID: \"89316109-e91a-4e35-b311-0c7f9a4c133e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29530635-tw549" Feb 23 09:15:00 crc kubenswrapper[4834]: I0223 09:15:00.401888 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-265gz\" (UniqueName: \"kubernetes.io/projected/89316109-e91a-4e35-b311-0c7f9a4c133e-kube-api-access-265gz\") pod \"collect-profiles-29530635-tw549\" (UID: \"89316109-e91a-4e35-b311-0c7f9a4c133e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29530635-tw549" Feb 23 09:15:00 crc kubenswrapper[4834]: I0223 09:15:00.556653 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29530635-tw549" Feb 23 09:15:00 crc kubenswrapper[4834]: I0223 09:15:00.726257 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29530635-tw549"] Feb 23 09:15:01 crc kubenswrapper[4834]: I0223 09:15:01.583353 4834 generic.go:334] "Generic (PLEG): container finished" podID="89316109-e91a-4e35-b311-0c7f9a4c133e" containerID="2a614c98c935d99ef4457e2221a1a0595e3603cac03b10769e2a400fb216515f" exitCode=0 Feb 23 09:15:01 crc kubenswrapper[4834]: I0223 09:15:01.583635 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29530635-tw549" event={"ID":"89316109-e91a-4e35-b311-0c7f9a4c133e","Type":"ContainerDied","Data":"2a614c98c935d99ef4457e2221a1a0595e3603cac03b10769e2a400fb216515f"} Feb 23 09:15:01 crc kubenswrapper[4834]: I0223 09:15:01.583718 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29530635-tw549" event={"ID":"89316109-e91a-4e35-b311-0c7f9a4c133e","Type":"ContainerStarted","Data":"e89198ee853191c0968e0f7cd1733ebd417ea15cd7a7bc53beaed161d228417a"} Feb 23 09:15:02 crc kubenswrapper[4834]: I0223 09:15:02.795245 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29530635-tw549" Feb 23 09:15:02 crc kubenswrapper[4834]: I0223 09:15:02.913958 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/89316109-e91a-4e35-b311-0c7f9a4c133e-secret-volume\") pod \"89316109-e91a-4e35-b311-0c7f9a4c133e\" (UID: \"89316109-e91a-4e35-b311-0c7f9a4c133e\") " Feb 23 09:15:02 crc kubenswrapper[4834]: I0223 09:15:02.914052 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-265gz\" (UniqueName: \"kubernetes.io/projected/89316109-e91a-4e35-b311-0c7f9a4c133e-kube-api-access-265gz\") pod \"89316109-e91a-4e35-b311-0c7f9a4c133e\" (UID: \"89316109-e91a-4e35-b311-0c7f9a4c133e\") " Feb 23 09:15:02 crc kubenswrapper[4834]: I0223 09:15:02.914198 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/89316109-e91a-4e35-b311-0c7f9a4c133e-config-volume\") pod \"89316109-e91a-4e35-b311-0c7f9a4c133e\" (UID: \"89316109-e91a-4e35-b311-0c7f9a4c133e\") " Feb 23 09:15:02 crc kubenswrapper[4834]: I0223 09:15:02.915148 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/89316109-e91a-4e35-b311-0c7f9a4c133e-config-volume" (OuterVolumeSpecName: "config-volume") pod "89316109-e91a-4e35-b311-0c7f9a4c133e" (UID: "89316109-e91a-4e35-b311-0c7f9a4c133e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:15:02 crc kubenswrapper[4834]: I0223 09:15:02.919302 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89316109-e91a-4e35-b311-0c7f9a4c133e-kube-api-access-265gz" (OuterVolumeSpecName: "kube-api-access-265gz") pod "89316109-e91a-4e35-b311-0c7f9a4c133e" (UID: "89316109-e91a-4e35-b311-0c7f9a4c133e"). InnerVolumeSpecName "kube-api-access-265gz". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:15:02 crc kubenswrapper[4834]: I0223 09:15:02.919341 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/89316109-e91a-4e35-b311-0c7f9a4c133e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "89316109-e91a-4e35-b311-0c7f9a4c133e" (UID: "89316109-e91a-4e35-b311-0c7f9a4c133e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 23 09:15:03 crc kubenswrapper[4834]: I0223 09:15:03.015552 4834 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/89316109-e91a-4e35-b311-0c7f9a4c133e-secret-volume\") on node \"crc\" DevicePath \"\"" Feb 23 09:15:03 crc kubenswrapper[4834]: I0223 09:15:03.015599 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-265gz\" (UniqueName: \"kubernetes.io/projected/89316109-e91a-4e35-b311-0c7f9a4c133e-kube-api-access-265gz\") on node \"crc\" DevicePath \"\"" Feb 23 09:15:03 crc kubenswrapper[4834]: I0223 09:15:03.015611 4834 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/89316109-e91a-4e35-b311-0c7f9a4c133e-config-volume\") on node \"crc\" DevicePath \"\"" Feb 23 09:15:03 crc kubenswrapper[4834]: I0223 09:15:03.598123 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29530635-tw549" event={"ID":"89316109-e91a-4e35-b311-0c7f9a4c133e","Type":"ContainerDied","Data":"e89198ee853191c0968e0f7cd1733ebd417ea15cd7a7bc53beaed161d228417a"} Feb 23 09:15:03 crc kubenswrapper[4834]: I0223 09:15:03.598177 4834 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e89198ee853191c0968e0f7cd1733ebd417ea15cd7a7bc53beaed161d228417a" Feb 23 09:15:03 crc kubenswrapper[4834]: I0223 09:15:03.598190 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29530635-tw549" Feb 23 09:15:27 crc kubenswrapper[4834]: I0223 09:15:27.810580 4834 patch_prober.go:28] interesting pod/machine-config-daemon-kt9lp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 23 09:15:27 crc kubenswrapper[4834]: I0223 09:15:27.811245 4834 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" podUID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 23 09:15:57 crc kubenswrapper[4834]: I0223 09:15:57.810195 4834 patch_prober.go:28] interesting pod/machine-config-daemon-kt9lp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 23 09:15:57 crc kubenswrapper[4834]: I0223 09:15:57.810876 4834 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" podUID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 23 09:15:57 crc kubenswrapper[4834]: I0223 09:15:57.810966 4834 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" Feb 23 09:15:57 crc kubenswrapper[4834]: I0223 09:15:57.812046 4834 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"88a28e626207b9996f70605163951b8b51203e8d24ce2c7e8948a63be9984191"} pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 23 09:15:57 crc kubenswrapper[4834]: I0223 09:15:57.812176 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" podUID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerName="machine-config-daemon" containerID="cri-o://88a28e626207b9996f70605163951b8b51203e8d24ce2c7e8948a63be9984191" gracePeriod=600 Feb 23 09:15:58 crc kubenswrapper[4834]: I0223 09:15:58.936927 4834 generic.go:334] "Generic (PLEG): container finished" podID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerID="88a28e626207b9996f70605163951b8b51203e8d24ce2c7e8948a63be9984191" exitCode=0 Feb 23 09:15:58 crc kubenswrapper[4834]: I0223 09:15:58.937057 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" event={"ID":"1172b9a5-71ca-49e9-a033-3b59c9c024a4","Type":"ContainerDied","Data":"88a28e626207b9996f70605163951b8b51203e8d24ce2c7e8948a63be9984191"} Feb 23 09:15:58 crc kubenswrapper[4834]: I0223 09:15:58.937486 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" event={"ID":"1172b9a5-71ca-49e9-a033-3b59c9c024a4","Type":"ContainerStarted","Data":"8b47de6c39b49990870479ce2e9821e6b671e9454136c5cd3f39336a30c7e515"} Feb 23 09:15:58 crc kubenswrapper[4834]: I0223 09:15:58.937530 4834 scope.go:117] "RemoveContainer" containerID="5f0104afd41d2190f8d4b34e88bbfed12c35766c1fe0c2a0e109ae44cbac3345" Feb 23 09:16:46 crc kubenswrapper[4834]: I0223 09:16:46.846107 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-r9vjb"] Feb 23 09:16:46 crc kubenswrapper[4834]: E0223 09:16:46.848030 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89316109-e91a-4e35-b311-0c7f9a4c133e" containerName="collect-profiles" Feb 23 09:16:46 crc kubenswrapper[4834]: I0223 09:16:46.848127 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="89316109-e91a-4e35-b311-0c7f9a4c133e" containerName="collect-profiles" Feb 23 09:16:46 crc kubenswrapper[4834]: I0223 09:16:46.848323 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="89316109-e91a-4e35-b311-0c7f9a4c133e" containerName="collect-profiles" Feb 23 09:16:46 crc kubenswrapper[4834]: I0223 09:16:46.848806 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-r9vjb" Feb 23 09:16:46 crc kubenswrapper[4834]: I0223 09:16:46.854312 4834 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-kz5ln" Feb 23 09:16:46 crc kubenswrapper[4834]: I0223 09:16:46.855080 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Feb 23 09:16:46 crc kubenswrapper[4834]: I0223 09:16:46.855120 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Feb 23 09:16:46 crc kubenswrapper[4834]: I0223 09:16:46.871940 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-r9vjb"] Feb 23 09:16:46 crc kubenswrapper[4834]: I0223 09:16:46.880145 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-858654f9db-ls5wx"] Feb 23 09:16:46 crc kubenswrapper[4834]: I0223 09:16:46.880801 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-ls5wx" Feb 23 09:16:46 crc kubenswrapper[4834]: I0223 09:16:46.887644 4834 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-5wb8d" Feb 23 09:16:46 crc kubenswrapper[4834]: I0223 09:16:46.889539 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-ls5wx"] Feb 23 09:16:46 crc kubenswrapper[4834]: I0223 09:16:46.910109 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-bftlt"] Feb 23 09:16:46 crc kubenswrapper[4834]: I0223 09:16:46.910785 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-bftlt" Feb 23 09:16:46 crc kubenswrapper[4834]: I0223 09:16:46.912808 4834 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-qfkvx" Feb 23 09:16:46 crc kubenswrapper[4834]: I0223 09:16:46.915686 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-bftlt"] Feb 23 09:16:46 crc kubenswrapper[4834]: I0223 09:16:46.985145 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5x7k\" (UniqueName: \"kubernetes.io/projected/f1c43b16-4505-4745-8dd1-d3ccd4568121-kube-api-access-k5x7k\") pod \"cert-manager-858654f9db-ls5wx\" (UID: \"f1c43b16-4505-4745-8dd1-d3ccd4568121\") " pod="cert-manager/cert-manager-858654f9db-ls5wx" Feb 23 09:16:46 crc kubenswrapper[4834]: I0223 09:16:46.985475 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jc54r\" (UniqueName: \"kubernetes.io/projected/cfdb4ce6-358f-4dd9-91e3-d322429bb391-kube-api-access-jc54r\") pod \"cert-manager-cainjector-cf98fcc89-r9vjb\" (UID: \"cfdb4ce6-358f-4dd9-91e3-d322429bb391\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-r9vjb" Feb 23 09:16:47 crc kubenswrapper[4834]: I0223 09:16:47.086589 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5x7k\" (UniqueName: \"kubernetes.io/projected/f1c43b16-4505-4745-8dd1-d3ccd4568121-kube-api-access-k5x7k\") pod \"cert-manager-858654f9db-ls5wx\" (UID: \"f1c43b16-4505-4745-8dd1-d3ccd4568121\") " pod="cert-manager/cert-manager-858654f9db-ls5wx" Feb 23 09:16:47 crc kubenswrapper[4834]: I0223 09:16:47.086650 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rl7lc\" (UniqueName: \"kubernetes.io/projected/14ec7de9-21a8-47f6-9ab9-bbce69d94ef8-kube-api-access-rl7lc\") pod \"cert-manager-webhook-687f57d79b-bftlt\" (UID: \"14ec7de9-21a8-47f6-9ab9-bbce69d94ef8\") " pod="cert-manager/cert-manager-webhook-687f57d79b-bftlt" Feb 23 09:16:47 crc kubenswrapper[4834]: I0223 09:16:47.086676 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jc54r\" (UniqueName: \"kubernetes.io/projected/cfdb4ce6-358f-4dd9-91e3-d322429bb391-kube-api-access-jc54r\") pod \"cert-manager-cainjector-cf98fcc89-r9vjb\" (UID: \"cfdb4ce6-358f-4dd9-91e3-d322429bb391\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-r9vjb" Feb 23 09:16:47 crc kubenswrapper[4834]: I0223 09:16:47.105814 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5x7k\" (UniqueName: \"kubernetes.io/projected/f1c43b16-4505-4745-8dd1-d3ccd4568121-kube-api-access-k5x7k\") pod \"cert-manager-858654f9db-ls5wx\" (UID: \"f1c43b16-4505-4745-8dd1-d3ccd4568121\") " pod="cert-manager/cert-manager-858654f9db-ls5wx" Feb 23 09:16:47 crc kubenswrapper[4834]: I0223 09:16:47.105938 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jc54r\" (UniqueName: \"kubernetes.io/projected/cfdb4ce6-358f-4dd9-91e3-d322429bb391-kube-api-access-jc54r\") pod \"cert-manager-cainjector-cf98fcc89-r9vjb\" (UID: \"cfdb4ce6-358f-4dd9-91e3-d322429bb391\") " pod="cert-manager/cert-manager-cainjector-cf98fcc89-r9vjb" Feb 23 09:16:47 crc kubenswrapper[4834]: I0223 09:16:47.169733 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-cf98fcc89-r9vjb" Feb 23 09:16:47 crc kubenswrapper[4834]: I0223 09:16:47.187756 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rl7lc\" (UniqueName: \"kubernetes.io/projected/14ec7de9-21a8-47f6-9ab9-bbce69d94ef8-kube-api-access-rl7lc\") pod \"cert-manager-webhook-687f57d79b-bftlt\" (UID: \"14ec7de9-21a8-47f6-9ab9-bbce69d94ef8\") " pod="cert-manager/cert-manager-webhook-687f57d79b-bftlt" Feb 23 09:16:47 crc kubenswrapper[4834]: I0223 09:16:47.196923 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-858654f9db-ls5wx" Feb 23 09:16:47 crc kubenswrapper[4834]: I0223 09:16:47.210138 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rl7lc\" (UniqueName: \"kubernetes.io/projected/14ec7de9-21a8-47f6-9ab9-bbce69d94ef8-kube-api-access-rl7lc\") pod \"cert-manager-webhook-687f57d79b-bftlt\" (UID: \"14ec7de9-21a8-47f6-9ab9-bbce69d94ef8\") " pod="cert-manager/cert-manager-webhook-687f57d79b-bftlt" Feb 23 09:16:47 crc kubenswrapper[4834]: I0223 09:16:47.226327 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-687f57d79b-bftlt" Feb 23 09:16:47 crc kubenswrapper[4834]: I0223 09:16:47.410920 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-cf98fcc89-r9vjb"] Feb 23 09:16:47 crc kubenswrapper[4834]: I0223 09:16:47.422070 4834 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 23 09:16:47 crc kubenswrapper[4834]: I0223 09:16:47.451102 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-r9vjb" event={"ID":"cfdb4ce6-358f-4dd9-91e3-d322429bb391","Type":"ContainerStarted","Data":"5a93f89266e77d5ba168927df1dd86073908615205d7f690edf6e6c016e9c2d1"} Feb 23 09:16:47 crc kubenswrapper[4834]: I0223 09:16:47.452890 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-858654f9db-ls5wx"] Feb 23 09:16:47 crc kubenswrapper[4834]: W0223 09:16:47.459832 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf1c43b16_4505_4745_8dd1_d3ccd4568121.slice/crio-0aae7d57c5ffef83445d30cf1253286cd1acdabb7e539e666b12096291133e5a WatchSource:0}: Error finding container 0aae7d57c5ffef83445d30cf1253286cd1acdabb7e539e666b12096291133e5a: Status 404 returned error can't find the container with id 0aae7d57c5ffef83445d30cf1253286cd1acdabb7e539e666b12096291133e5a Feb 23 09:16:47 crc kubenswrapper[4834]: I0223 09:16:47.531855 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-687f57d79b-bftlt"] Feb 23 09:16:47 crc kubenswrapper[4834]: W0223 09:16:47.534890 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod14ec7de9_21a8_47f6_9ab9_bbce69d94ef8.slice/crio-b047777c7a6b5d263c97d1fffe64203363d30cc0b1be8ce84e304fda2a9b430b WatchSource:0}: Error finding container b047777c7a6b5d263c97d1fffe64203363d30cc0b1be8ce84e304fda2a9b430b: Status 404 returned error can't find the container with id b047777c7a6b5d263c97d1fffe64203363d30cc0b1be8ce84e304fda2a9b430b Feb 23 09:16:48 crc kubenswrapper[4834]: I0223 09:16:48.458524 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-bftlt" event={"ID":"14ec7de9-21a8-47f6-9ab9-bbce69d94ef8","Type":"ContainerStarted","Data":"b047777c7a6b5d263c97d1fffe64203363d30cc0b1be8ce84e304fda2a9b430b"} Feb 23 09:16:48 crc kubenswrapper[4834]: I0223 09:16:48.460437 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-ls5wx" event={"ID":"f1c43b16-4505-4745-8dd1-d3ccd4568121","Type":"ContainerStarted","Data":"0aae7d57c5ffef83445d30cf1253286cd1acdabb7e539e666b12096291133e5a"} Feb 23 09:16:51 crc kubenswrapper[4834]: I0223 09:16:51.479330 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-687f57d79b-bftlt" event={"ID":"14ec7de9-21a8-47f6-9ab9-bbce69d94ef8","Type":"ContainerStarted","Data":"b38ba9ed194a67ab85c261e5229d93a80ba5234f607eb5df700d47db22e22fbd"} Feb 23 09:16:51 crc kubenswrapper[4834]: I0223 09:16:51.480506 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-687f57d79b-bftlt" Feb 23 09:16:51 crc kubenswrapper[4834]: I0223 09:16:51.481599 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-cf98fcc89-r9vjb" event={"ID":"cfdb4ce6-358f-4dd9-91e3-d322429bb391","Type":"ContainerStarted","Data":"59acf3f5b25b43fa064eda10162d011e1f8e03c33f98f1cc4f555ff74420bb9c"} Feb 23 09:16:51 crc kubenswrapper[4834]: I0223 09:16:51.483856 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-858654f9db-ls5wx" event={"ID":"f1c43b16-4505-4745-8dd1-d3ccd4568121","Type":"ContainerStarted","Data":"c6188a5b6faae6031892d81b77675498cfca360fd195aa89a5eb25e7fb3766d9"} Feb 23 09:16:51 crc kubenswrapper[4834]: I0223 09:16:51.496254 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-687f57d79b-bftlt" podStartSLOduration=2.041339771 podStartE2EDuration="5.496225842s" podCreationTimestamp="2026-02-23 09:16:46 +0000 UTC" firstStartedPulling="2026-02-23 09:16:47.536867443 +0000 UTC m=+543.615181830" lastFinishedPulling="2026-02-23 09:16:50.991753514 +0000 UTC m=+547.070067901" observedRunningTime="2026-02-23 09:16:51.492618632 +0000 UTC m=+547.570933019" watchObservedRunningTime="2026-02-23 09:16:51.496225842 +0000 UTC m=+547.574540259" Feb 23 09:16:51 crc kubenswrapper[4834]: I0223 09:16:51.510827 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-cf98fcc89-r9vjb" podStartSLOduration=1.979781426 podStartE2EDuration="5.510805403s" podCreationTimestamp="2026-02-23 09:16:46 +0000 UTC" firstStartedPulling="2026-02-23 09:16:47.421799595 +0000 UTC m=+543.500113982" lastFinishedPulling="2026-02-23 09:16:50.952823572 +0000 UTC m=+547.031137959" observedRunningTime="2026-02-23 09:16:51.507158192 +0000 UTC m=+547.585472579" watchObservedRunningTime="2026-02-23 09:16:51.510805403 +0000 UTC m=+547.589119790" Feb 23 09:16:51 crc kubenswrapper[4834]: I0223 09:16:51.566119 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-858654f9db-ls5wx" podStartSLOduration=2.020591059 podStartE2EDuration="5.566089265s" podCreationTimestamp="2026-02-23 09:16:46 +0000 UTC" firstStartedPulling="2026-02-23 09:16:47.463315568 +0000 UTC m=+543.541629955" lastFinishedPulling="2026-02-23 09:16:51.008813774 +0000 UTC m=+547.087128161" observedRunningTime="2026-02-23 09:16:51.560660525 +0000 UTC m=+547.638974932" watchObservedRunningTime="2026-02-23 09:16:51.566089265 +0000 UTC m=+547.644403652" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.229608 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-687f57d79b-bftlt" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.425003 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-x2c4z"] Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.425722 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd" gracePeriod=30 Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.425796 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="northd" containerID="cri-o://5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023" gracePeriod=30 Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.425749 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="kube-rbac-proxy-node" containerID="cri-o://1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9" gracePeriod=30 Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.425923 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="ovn-acl-logging" containerID="cri-o://ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e" gracePeriod=30 Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.425950 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="sbdb" containerID="cri-o://af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529" gracePeriod=30 Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.425687 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="ovn-controller" containerID="cri-o://ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649" gracePeriod=30 Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.425700 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="nbdb" containerID="cri-o://9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6" gracePeriod=30 Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.471835 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="ovnkube-controller" containerID="cri-o://094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6" gracePeriod=30 Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.521853 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-n556f_56fcafd6-2c67-4f14-a43e-8a6cd12f012e/kube-multus/0.log" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.521918 4834 generic.go:334] "Generic (PLEG): container finished" podID="56fcafd6-2c67-4f14-a43e-8a6cd12f012e" containerID="446599f5f5dc690f406caf10dc8da7f3834a353ad1fa1d37e4998a0bc247834d" exitCode=2 Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.521951 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-n556f" event={"ID":"56fcafd6-2c67-4f14-a43e-8a6cd12f012e","Type":"ContainerDied","Data":"446599f5f5dc690f406caf10dc8da7f3834a353ad1fa1d37e4998a0bc247834d"} Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.522380 4834 scope.go:117] "RemoveContainer" containerID="446599f5f5dc690f406caf10dc8da7f3834a353ad1fa1d37e4998a0bc247834d" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.774271 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-x2c4z_5102eeec-7776-42da-8027-c4e5f9c13450/ovn-acl-logging/0.log" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.775288 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-x2c4z_5102eeec-7776-42da-8027-c4e5f9c13450/ovn-controller/0.log" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.776081 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.845960 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-p8vqs"] Feb 23 09:16:57 crc kubenswrapper[4834]: E0223 09:16:57.846230 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="kubecfg-setup" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.846247 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="kubecfg-setup" Feb 23 09:16:57 crc kubenswrapper[4834]: E0223 09:16:57.846267 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="northd" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.846279 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="northd" Feb 23 09:16:57 crc kubenswrapper[4834]: E0223 09:16:57.846295 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="sbdb" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.846305 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="sbdb" Feb 23 09:16:57 crc kubenswrapper[4834]: E0223 09:16:57.846317 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="nbdb" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.846327 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="nbdb" Feb 23 09:16:57 crc kubenswrapper[4834]: E0223 09:16:57.846342 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="ovn-controller" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.846352 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="ovn-controller" Feb 23 09:16:57 crc kubenswrapper[4834]: E0223 09:16:57.846367 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="kube-rbac-proxy-node" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.846378 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="kube-rbac-proxy-node" Feb 23 09:16:57 crc kubenswrapper[4834]: E0223 09:16:57.846392 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="ovnkube-controller" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.846423 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="ovnkube-controller" Feb 23 09:16:57 crc kubenswrapper[4834]: E0223 09:16:57.846442 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="ovn-acl-logging" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.846452 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="ovn-acl-logging" Feb 23 09:16:57 crc kubenswrapper[4834]: E0223 09:16:57.846466 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="kube-rbac-proxy-ovn-metrics" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.846475 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="kube-rbac-proxy-ovn-metrics" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.846626 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="northd" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.846647 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="ovnkube-controller" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.846661 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="sbdb" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.846674 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="ovn-acl-logging" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.846684 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="nbdb" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.846695 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="kube-rbac-proxy-node" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.846704 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="ovn-controller" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.846718 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" containerName="kube-rbac-proxy-ovn-metrics" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.848896 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.903679 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-slash\") pod \"5102eeec-7776-42da-8027-c4e5f9c13450\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.903751 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-systemd-units\") pod \"5102eeec-7776-42da-8027-c4e5f9c13450\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.903821 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-slash" (OuterVolumeSpecName: "host-slash") pod "5102eeec-7776-42da-8027-c4e5f9c13450" (UID: "5102eeec-7776-42da-8027-c4e5f9c13450"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.903830 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5102eeec-7776-42da-8027-c4e5f9c13450-ovnkube-config\") pod \"5102eeec-7776-42da-8027-c4e5f9c13450\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.903912 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5102eeec-7776-42da-8027-c4e5f9c13450-ovn-node-metrics-cert\") pod \"5102eeec-7776-42da-8027-c4e5f9c13450\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.903925 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "5102eeec-7776-42da-8027-c4e5f9c13450" (UID: "5102eeec-7776-42da-8027-c4e5f9c13450"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.903952 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-var-lib-cni-networks-ovn-kubernetes\") pod \"5102eeec-7776-42da-8027-c4e5f9c13450\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.903998 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5102eeec-7776-42da-8027-c4e5f9c13450-ovnkube-script-lib\") pod \"5102eeec-7776-42da-8027-c4e5f9c13450\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904034 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-node-log\") pod \"5102eeec-7776-42da-8027-c4e5f9c13450\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904067 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-cni-bin\") pod \"5102eeec-7776-42da-8027-c4e5f9c13450\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904094 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-run-netns\") pod \"5102eeec-7776-42da-8027-c4e5f9c13450\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904105 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "5102eeec-7776-42da-8027-c4e5f9c13450" (UID: "5102eeec-7776-42da-8027-c4e5f9c13450"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904122 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-cni-netd\") pod \"5102eeec-7776-42da-8027-c4e5f9c13450\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904154 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "5102eeec-7776-42da-8027-c4e5f9c13450" (UID: "5102eeec-7776-42da-8027-c4e5f9c13450"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904195 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-run-openvswitch\") pod \"5102eeec-7776-42da-8027-c4e5f9c13450\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904250 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxvrj\" (UniqueName: \"kubernetes.io/projected/5102eeec-7776-42da-8027-c4e5f9c13450-kube-api-access-bxvrj\") pod \"5102eeec-7776-42da-8027-c4e5f9c13450\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904283 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-run-ovn-kubernetes\") pod \"5102eeec-7776-42da-8027-c4e5f9c13450\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904328 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-etc-openvswitch\") pod \"5102eeec-7776-42da-8027-c4e5f9c13450\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904352 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-run-systemd\") pod \"5102eeec-7776-42da-8027-c4e5f9c13450\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904373 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-kubelet\") pod \"5102eeec-7776-42da-8027-c4e5f9c13450\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904427 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "5102eeec-7776-42da-8027-c4e5f9c13450" (UID: "5102eeec-7776-42da-8027-c4e5f9c13450"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904470 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "5102eeec-7776-42da-8027-c4e5f9c13450" (UID: "5102eeec-7776-42da-8027-c4e5f9c13450"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904446 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-run-ovn\") pod \"5102eeec-7776-42da-8027-c4e5f9c13450\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904508 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-node-log" (OuterVolumeSpecName: "node-log") pod "5102eeec-7776-42da-8027-c4e5f9c13450" (UID: "5102eeec-7776-42da-8027-c4e5f9c13450"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904519 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-log-socket\") pod \"5102eeec-7776-42da-8027-c4e5f9c13450\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904555 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-var-lib-openvswitch\") pod \"5102eeec-7776-42da-8027-c4e5f9c13450\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904606 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5102eeec-7776-42da-8027-c4e5f9c13450-env-overrides\") pod \"5102eeec-7776-42da-8027-c4e5f9c13450\" (UID: \"5102eeec-7776-42da-8027-c4e5f9c13450\") " Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904551 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "5102eeec-7776-42da-8027-c4e5f9c13450" (UID: "5102eeec-7776-42da-8027-c4e5f9c13450"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904582 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "5102eeec-7776-42da-8027-c4e5f9c13450" (UID: "5102eeec-7776-42da-8027-c4e5f9c13450"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904652 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5102eeec-7776-42da-8027-c4e5f9c13450-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "5102eeec-7776-42da-8027-c4e5f9c13450" (UID: "5102eeec-7776-42da-8027-c4e5f9c13450"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904711 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "5102eeec-7776-42da-8027-c4e5f9c13450" (UID: "5102eeec-7776-42da-8027-c4e5f9c13450"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904716 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5102eeec-7776-42da-8027-c4e5f9c13450-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "5102eeec-7776-42da-8027-c4e5f9c13450" (UID: "5102eeec-7776-42da-8027-c4e5f9c13450"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904740 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "5102eeec-7776-42da-8027-c4e5f9c13450" (UID: "5102eeec-7776-42da-8027-c4e5f9c13450"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904750 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "5102eeec-7776-42da-8027-c4e5f9c13450" (UID: "5102eeec-7776-42da-8027-c4e5f9c13450"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904780 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "5102eeec-7776-42da-8027-c4e5f9c13450" (UID: "5102eeec-7776-42da-8027-c4e5f9c13450"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904808 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-log-socket" (OuterVolumeSpecName: "log-socket") pod "5102eeec-7776-42da-8027-c4e5f9c13450" (UID: "5102eeec-7776-42da-8027-c4e5f9c13450"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904814 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-log-socket\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904906 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-systemd-units\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904948 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-run-ovn\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.904979 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-ovn-node-metrics-cert\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905057 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905098 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-node-log\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905174 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-run-systemd\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905223 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8lstq\" (UniqueName: \"kubernetes.io/projected/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-kube-api-access-8lstq\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905275 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-host-run-ovn-kubernetes\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905305 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-host-cni-netd\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905332 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-ovnkube-script-lib\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905350 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5102eeec-7776-42da-8027-c4e5f9c13450-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "5102eeec-7776-42da-8027-c4e5f9c13450" (UID: "5102eeec-7776-42da-8027-c4e5f9c13450"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905366 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-env-overrides\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905422 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-ovnkube-config\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905486 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-host-cni-bin\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905539 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-host-slash\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905561 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-var-lib-openvswitch\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905585 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-etc-openvswitch\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905617 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-host-kubelet\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905684 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-run-openvswitch\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905713 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-host-run-netns\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905763 4834 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905777 4834 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/5102eeec-7776-42da-8027-c4e5f9c13450-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905791 4834 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-node-log\") on node \"crc\" DevicePath \"\"" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905802 4834 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-cni-bin\") on node \"crc\" DevicePath \"\"" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905815 4834 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-run-netns\") on node \"crc\" DevicePath \"\"" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905827 4834 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-cni-netd\") on node \"crc\" DevicePath \"\"" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905840 4834 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-run-openvswitch\") on node \"crc\" DevicePath \"\"" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905852 4834 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905864 4834 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905875 4834 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-kubelet\") on node \"crc\" DevicePath \"\"" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905887 4834 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-run-ovn\") on node \"crc\" DevicePath \"\"" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905898 4834 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-log-socket\") on node \"crc\" DevicePath \"\"" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905909 4834 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905921 4834 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/5102eeec-7776-42da-8027-c4e5f9c13450-env-overrides\") on node \"crc\" DevicePath \"\"" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905932 4834 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-host-slash\") on node \"crc\" DevicePath \"\"" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905943 4834 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-systemd-units\") on node \"crc\" DevicePath \"\"" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.905954 4834 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/5102eeec-7776-42da-8027-c4e5f9c13450-ovnkube-config\") on node \"crc\" DevicePath \"\"" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.909348 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5102eeec-7776-42da-8027-c4e5f9c13450-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "5102eeec-7776-42da-8027-c4e5f9c13450" (UID: "5102eeec-7776-42da-8027-c4e5f9c13450"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.911046 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5102eeec-7776-42da-8027-c4e5f9c13450-kube-api-access-bxvrj" (OuterVolumeSpecName: "kube-api-access-bxvrj") pod "5102eeec-7776-42da-8027-c4e5f9c13450" (UID: "5102eeec-7776-42da-8027-c4e5f9c13450"). InnerVolumeSpecName "kube-api-access-bxvrj". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:16:57 crc kubenswrapper[4834]: I0223 09:16:57.919154 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "5102eeec-7776-42da-8027-c4e5f9c13450" (UID: "5102eeec-7776-42da-8027-c4e5f9c13450"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.024901 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-env-overrides\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.025337 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-ovnkube-config\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.025392 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-host-cni-bin\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.025497 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-host-slash\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.025534 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-env-overrides\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.025545 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-var-lib-openvswitch\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.025590 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-host-cni-bin\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.025610 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-etc-openvswitch\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.025638 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-host-slash\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.025660 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-host-kubelet\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.025691 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-etc-openvswitch\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.025713 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-host-kubelet\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.025671 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-var-lib-openvswitch\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.025766 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-run-openvswitch\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.025812 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-host-run-netns\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.025865 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-run-openvswitch\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.025874 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-log-socket\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.025910 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-host-run-netns\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.025932 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-log-socket\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.025932 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-systemd-units\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.025986 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-run-ovn\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.026045 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-ovn-node-metrics-cert\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.026117 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-run-ovn\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.026202 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.026281 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-node-log\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.026329 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-run-systemd\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.026373 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8lstq\" (UniqueName: \"kubernetes.io/projected/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-kube-api-access-8lstq\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.026458 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-host-run-ovn-kubernetes\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.026514 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-host-cni-netd\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.026561 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-ovnkube-script-lib\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.026624 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-node-log\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.026639 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxvrj\" (UniqueName: \"kubernetes.io/projected/5102eeec-7776-42da-8027-c4e5f9c13450-kube-api-access-bxvrj\") on node \"crc\" DevicePath \"\"" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.026718 4834 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/5102eeec-7776-42da-8027-c4e5f9c13450-run-systemd\") on node \"crc\" DevicePath \"\"" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.026735 4834 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/5102eeec-7776-42da-8027-c4e5f9c13450-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.026096 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-systemd-units\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.026723 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-ovnkube-config\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.026785 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.026885 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-run-systemd\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.026958 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-host-run-ovn-kubernetes\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.027031 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-host-cni-netd\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.027893 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-ovnkube-script-lib\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.031087 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-ovn-node-metrics-cert\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.044516 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8lstq\" (UniqueName: \"kubernetes.io/projected/c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00-kube-api-access-8lstq\") pod \"ovnkube-node-p8vqs\" (UID: \"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00\") " pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.166658 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:16:58 crc kubenswrapper[4834]: W0223 09:16:58.183208 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc752b9b6_b6e7_48f1_b2aa_9d4c7187ff00.slice/crio-e84a99b73e8adb487d5727c03e0b2551122cb7cd8c8968ba56d1416ddc240f0a WatchSource:0}: Error finding container e84a99b73e8adb487d5727c03e0b2551122cb7cd8c8968ba56d1416ddc240f0a: Status 404 returned error can't find the container with id e84a99b73e8adb487d5727c03e0b2551122cb7cd8c8968ba56d1416ddc240f0a Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.528300 4834 generic.go:334] "Generic (PLEG): container finished" podID="c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00" containerID="7fe291440683c009d6dad0e96a1cdb1ed37d2bcaaaf9f427d4618c038ef0ff9d" exitCode=0 Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.528419 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" event={"ID":"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00","Type":"ContainerDied","Data":"7fe291440683c009d6dad0e96a1cdb1ed37d2bcaaaf9f427d4618c038ef0ff9d"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.528467 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" event={"ID":"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00","Type":"ContainerStarted","Data":"e84a99b73e8adb487d5727c03e0b2551122cb7cd8c8968ba56d1416ddc240f0a"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.532069 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-n556f_56fcafd6-2c67-4f14-a43e-8a6cd12f012e/kube-multus/0.log" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.532156 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-n556f" event={"ID":"56fcafd6-2c67-4f14-a43e-8a6cd12f012e","Type":"ContainerStarted","Data":"71073c99210318fbbbfc84f75e467fc45bb1d44da114887d67669c7c9b96eac1"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.535684 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-x2c4z_5102eeec-7776-42da-8027-c4e5f9c13450/ovn-acl-logging/0.log" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536128 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-x2c4z_5102eeec-7776-42da-8027-c4e5f9c13450/ovn-controller/0.log" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536495 4834 generic.go:334] "Generic (PLEG): container finished" podID="5102eeec-7776-42da-8027-c4e5f9c13450" containerID="094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6" exitCode=0 Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536520 4834 generic.go:334] "Generic (PLEG): container finished" podID="5102eeec-7776-42da-8027-c4e5f9c13450" containerID="af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529" exitCode=0 Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536529 4834 generic.go:334] "Generic (PLEG): container finished" podID="5102eeec-7776-42da-8027-c4e5f9c13450" containerID="9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6" exitCode=0 Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536537 4834 generic.go:334] "Generic (PLEG): container finished" podID="5102eeec-7776-42da-8027-c4e5f9c13450" containerID="5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023" exitCode=0 Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536545 4834 generic.go:334] "Generic (PLEG): container finished" podID="5102eeec-7776-42da-8027-c4e5f9c13450" containerID="81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd" exitCode=0 Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536552 4834 generic.go:334] "Generic (PLEG): container finished" podID="5102eeec-7776-42da-8027-c4e5f9c13450" containerID="1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9" exitCode=0 Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536558 4834 generic.go:334] "Generic (PLEG): container finished" podID="5102eeec-7776-42da-8027-c4e5f9c13450" containerID="ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e" exitCode=143 Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536565 4834 generic.go:334] "Generic (PLEG): container finished" podID="5102eeec-7776-42da-8027-c4e5f9c13450" containerID="ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649" exitCode=143 Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536584 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" event={"ID":"5102eeec-7776-42da-8027-c4e5f9c13450","Type":"ContainerDied","Data":"094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536625 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536633 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" event={"ID":"5102eeec-7776-42da-8027-c4e5f9c13450","Type":"ContainerDied","Data":"af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536660 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" event={"ID":"5102eeec-7776-42da-8027-c4e5f9c13450","Type":"ContainerDied","Data":"9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536670 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" event={"ID":"5102eeec-7776-42da-8027-c4e5f9c13450","Type":"ContainerDied","Data":"5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536682 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" event={"ID":"5102eeec-7776-42da-8027-c4e5f9c13450","Type":"ContainerDied","Data":"81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536691 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" event={"ID":"5102eeec-7776-42da-8027-c4e5f9c13450","Type":"ContainerDied","Data":"1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536701 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536711 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536712 4834 scope.go:117] "RemoveContainer" containerID="094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536717 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536823 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" event={"ID":"5102eeec-7776-42da-8027-c4e5f9c13450","Type":"ContainerDied","Data":"ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536847 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536862 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536869 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536875 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536880 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536885 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536890 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536895 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536900 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536907 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" event={"ID":"5102eeec-7776-42da-8027-c4e5f9c13450","Type":"ContainerDied","Data":"ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536915 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536921 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536926 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536931 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536936 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536941 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536946 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536956 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536977 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536987 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-x2c4z" event={"ID":"5102eeec-7776-42da-8027-c4e5f9c13450","Type":"ContainerDied","Data":"8b1dbfd6a122f81ac3ef84ab31dc4c4371a67d615068efe958d96b78731381f2"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.536998 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.537006 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.537014 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.537020 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.537026 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.537031 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.537053 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.537058 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.537064 4834 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f"} Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.601339 4834 scope.go:117] "RemoveContainer" containerID="af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.627935 4834 scope.go:117] "RemoveContainer" containerID="9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.641301 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-x2c4z"] Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.651370 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-x2c4z"] Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.652085 4834 scope.go:117] "RemoveContainer" containerID="5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.667812 4834 scope.go:117] "RemoveContainer" containerID="81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.680632 4834 scope.go:117] "RemoveContainer" containerID="1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.695008 4834 scope.go:117] "RemoveContainer" containerID="ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.722255 4834 scope.go:117] "RemoveContainer" containerID="ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.751296 4834 scope.go:117] "RemoveContainer" containerID="4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.765012 4834 scope.go:117] "RemoveContainer" containerID="094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6" Feb 23 09:16:58 crc kubenswrapper[4834]: E0223 09:16:58.765662 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6\": container with ID starting with 094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6 not found: ID does not exist" containerID="094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.765700 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6"} err="failed to get container status \"094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6\": rpc error: code = NotFound desc = could not find container \"094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6\": container with ID starting with 094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.765725 4834 scope.go:117] "RemoveContainer" containerID="af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529" Feb 23 09:16:58 crc kubenswrapper[4834]: E0223 09:16:58.766082 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529\": container with ID starting with af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529 not found: ID does not exist" containerID="af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.766122 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529"} err="failed to get container status \"af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529\": rpc error: code = NotFound desc = could not find container \"af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529\": container with ID starting with af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.766150 4834 scope.go:117] "RemoveContainer" containerID="9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6" Feb 23 09:16:58 crc kubenswrapper[4834]: E0223 09:16:58.766621 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6\": container with ID starting with 9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6 not found: ID does not exist" containerID="9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.766669 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6"} err="failed to get container status \"9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6\": rpc error: code = NotFound desc = could not find container \"9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6\": container with ID starting with 9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.766696 4834 scope.go:117] "RemoveContainer" containerID="5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023" Feb 23 09:16:58 crc kubenswrapper[4834]: E0223 09:16:58.767063 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023\": container with ID starting with 5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023 not found: ID does not exist" containerID="5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.767085 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023"} err="failed to get container status \"5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023\": rpc error: code = NotFound desc = could not find container \"5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023\": container with ID starting with 5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.767099 4834 scope.go:117] "RemoveContainer" containerID="81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd" Feb 23 09:16:58 crc kubenswrapper[4834]: E0223 09:16:58.767458 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd\": container with ID starting with 81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd not found: ID does not exist" containerID="81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.767493 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd"} err="failed to get container status \"81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd\": rpc error: code = NotFound desc = could not find container \"81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd\": container with ID starting with 81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.767541 4834 scope.go:117] "RemoveContainer" containerID="1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9" Feb 23 09:16:58 crc kubenswrapper[4834]: E0223 09:16:58.767897 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9\": container with ID starting with 1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9 not found: ID does not exist" containerID="1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.767958 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9"} err="failed to get container status \"1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9\": rpc error: code = NotFound desc = could not find container \"1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9\": container with ID starting with 1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.768012 4834 scope.go:117] "RemoveContainer" containerID="ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e" Feb 23 09:16:58 crc kubenswrapper[4834]: E0223 09:16:58.768477 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e\": container with ID starting with ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e not found: ID does not exist" containerID="ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.768507 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e"} err="failed to get container status \"ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e\": rpc error: code = NotFound desc = could not find container \"ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e\": container with ID starting with ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.768525 4834 scope.go:117] "RemoveContainer" containerID="ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649" Feb 23 09:16:58 crc kubenswrapper[4834]: E0223 09:16:58.768822 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649\": container with ID starting with ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649 not found: ID does not exist" containerID="ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.768852 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649"} err="failed to get container status \"ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649\": rpc error: code = NotFound desc = could not find container \"ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649\": container with ID starting with ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.768873 4834 scope.go:117] "RemoveContainer" containerID="4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f" Feb 23 09:16:58 crc kubenswrapper[4834]: E0223 09:16:58.769128 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f\": container with ID starting with 4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f not found: ID does not exist" containerID="4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.769158 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f"} err="failed to get container status \"4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f\": rpc error: code = NotFound desc = could not find container \"4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f\": container with ID starting with 4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.769178 4834 scope.go:117] "RemoveContainer" containerID="094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.769528 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6"} err="failed to get container status \"094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6\": rpc error: code = NotFound desc = could not find container \"094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6\": container with ID starting with 094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.769553 4834 scope.go:117] "RemoveContainer" containerID="af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.769813 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529"} err="failed to get container status \"af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529\": rpc error: code = NotFound desc = could not find container \"af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529\": container with ID starting with af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.769832 4834 scope.go:117] "RemoveContainer" containerID="9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.770176 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6"} err="failed to get container status \"9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6\": rpc error: code = NotFound desc = could not find container \"9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6\": container with ID starting with 9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.770207 4834 scope.go:117] "RemoveContainer" containerID="5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.770569 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023"} err="failed to get container status \"5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023\": rpc error: code = NotFound desc = could not find container \"5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023\": container with ID starting with 5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.770599 4834 scope.go:117] "RemoveContainer" containerID="81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.770869 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd"} err="failed to get container status \"81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd\": rpc error: code = NotFound desc = could not find container \"81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd\": container with ID starting with 81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.770887 4834 scope.go:117] "RemoveContainer" containerID="1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.771162 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9"} err="failed to get container status \"1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9\": rpc error: code = NotFound desc = could not find container \"1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9\": container with ID starting with 1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.771219 4834 scope.go:117] "RemoveContainer" containerID="ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.771736 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e"} err="failed to get container status \"ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e\": rpc error: code = NotFound desc = could not find container \"ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e\": container with ID starting with ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.771759 4834 scope.go:117] "RemoveContainer" containerID="ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.772121 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649"} err="failed to get container status \"ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649\": rpc error: code = NotFound desc = could not find container \"ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649\": container with ID starting with ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.772147 4834 scope.go:117] "RemoveContainer" containerID="4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.772383 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f"} err="failed to get container status \"4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f\": rpc error: code = NotFound desc = could not find container \"4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f\": container with ID starting with 4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.772461 4834 scope.go:117] "RemoveContainer" containerID="094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.772862 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6"} err="failed to get container status \"094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6\": rpc error: code = NotFound desc = could not find container \"094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6\": container with ID starting with 094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.772881 4834 scope.go:117] "RemoveContainer" containerID="af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.773216 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529"} err="failed to get container status \"af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529\": rpc error: code = NotFound desc = could not find container \"af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529\": container with ID starting with af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.773237 4834 scope.go:117] "RemoveContainer" containerID="9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.773575 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6"} err="failed to get container status \"9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6\": rpc error: code = NotFound desc = could not find container \"9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6\": container with ID starting with 9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.773602 4834 scope.go:117] "RemoveContainer" containerID="5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.773941 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023"} err="failed to get container status \"5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023\": rpc error: code = NotFound desc = could not find container \"5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023\": container with ID starting with 5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.773966 4834 scope.go:117] "RemoveContainer" containerID="81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.774260 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd"} err="failed to get container status \"81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd\": rpc error: code = NotFound desc = could not find container \"81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd\": container with ID starting with 81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.774284 4834 scope.go:117] "RemoveContainer" containerID="1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.774579 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9"} err="failed to get container status \"1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9\": rpc error: code = NotFound desc = could not find container \"1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9\": container with ID starting with 1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.774602 4834 scope.go:117] "RemoveContainer" containerID="ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.774872 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e"} err="failed to get container status \"ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e\": rpc error: code = NotFound desc = could not find container \"ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e\": container with ID starting with ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.774893 4834 scope.go:117] "RemoveContainer" containerID="ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.775189 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649"} err="failed to get container status \"ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649\": rpc error: code = NotFound desc = could not find container \"ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649\": container with ID starting with ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.775209 4834 scope.go:117] "RemoveContainer" containerID="4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.775512 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f"} err="failed to get container status \"4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f\": rpc error: code = NotFound desc = could not find container \"4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f\": container with ID starting with 4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.775534 4834 scope.go:117] "RemoveContainer" containerID="094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.775995 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6"} err="failed to get container status \"094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6\": rpc error: code = NotFound desc = could not find container \"094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6\": container with ID starting with 094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.776022 4834 scope.go:117] "RemoveContainer" containerID="af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.776267 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529"} err="failed to get container status \"af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529\": rpc error: code = NotFound desc = could not find container \"af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529\": container with ID starting with af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.776289 4834 scope.go:117] "RemoveContainer" containerID="9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.776537 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6"} err="failed to get container status \"9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6\": rpc error: code = NotFound desc = could not find container \"9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6\": container with ID starting with 9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.776560 4834 scope.go:117] "RemoveContainer" containerID="5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.776847 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023"} err="failed to get container status \"5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023\": rpc error: code = NotFound desc = could not find container \"5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023\": container with ID starting with 5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.776876 4834 scope.go:117] "RemoveContainer" containerID="81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.777264 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd"} err="failed to get container status \"81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd\": rpc error: code = NotFound desc = could not find container \"81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd\": container with ID starting with 81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.777297 4834 scope.go:117] "RemoveContainer" containerID="1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.777586 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9"} err="failed to get container status \"1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9\": rpc error: code = NotFound desc = could not find container \"1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9\": container with ID starting with 1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.777613 4834 scope.go:117] "RemoveContainer" containerID="ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.777870 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e"} err="failed to get container status \"ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e\": rpc error: code = NotFound desc = could not find container \"ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e\": container with ID starting with ceb45b78fa43793ec1fea9bc280a99c855bb681d196d98a5fb0ffac3bd36d14e not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.777899 4834 scope.go:117] "RemoveContainer" containerID="ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.778191 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649"} err="failed to get container status \"ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649\": rpc error: code = NotFound desc = could not find container \"ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649\": container with ID starting with ddc9e3d242aa66fcd1715843d2dfb786bd2fdc2b73d0a36bc8a55d86d87fd649 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.778223 4834 scope.go:117] "RemoveContainer" containerID="4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.778513 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f"} err="failed to get container status \"4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f\": rpc error: code = NotFound desc = could not find container \"4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f\": container with ID starting with 4ada780e8866753bdcdd65762c2ef6f16a812260b4ac3c9134fec68aea237b0f not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.778597 4834 scope.go:117] "RemoveContainer" containerID="094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.778859 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6"} err="failed to get container status \"094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6\": rpc error: code = NotFound desc = could not find container \"094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6\": container with ID starting with 094905851c9b94f1a6abba94ad0705a087d453059840cc766e2eb9d7ac2d4ca6 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.778892 4834 scope.go:117] "RemoveContainer" containerID="af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.779229 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529"} err="failed to get container status \"af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529\": rpc error: code = NotFound desc = could not find container \"af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529\": container with ID starting with af4a5049c015ed981c054618eeba08032221e1e076347be39624a5b536b4e529 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.779258 4834 scope.go:117] "RemoveContainer" containerID="9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.779490 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6"} err="failed to get container status \"9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6\": rpc error: code = NotFound desc = could not find container \"9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6\": container with ID starting with 9e71c8f231e95758846dd55c7d1551f270984827a809edb79aabfee80c7f73d6 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.779520 4834 scope.go:117] "RemoveContainer" containerID="5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.780036 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023"} err="failed to get container status \"5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023\": rpc error: code = NotFound desc = could not find container \"5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023\": container with ID starting with 5c23531556f88c45e6f9c2a160a435ef34d256c8d88b22d9d8dc3de45730f023 not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.780092 4834 scope.go:117] "RemoveContainer" containerID="81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.780378 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd"} err="failed to get container status \"81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd\": rpc error: code = NotFound desc = could not find container \"81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd\": container with ID starting with 81349a9b9551c2a230813b79c9f04c48adaaa6239c54755ddaa1ae84a37c6edd not found: ID does not exist" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.780417 4834 scope.go:117] "RemoveContainer" containerID="1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9" Feb 23 09:16:58 crc kubenswrapper[4834]: I0223 09:16:58.780752 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9"} err="failed to get container status \"1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9\": rpc error: code = NotFound desc = could not find container \"1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9\": container with ID starting with 1b81b69ac4d71459c44b2840fdbbea2c4976ada02a28cc4685af93776afed3c9 not found: ID does not exist" Feb 23 09:16:59 crc kubenswrapper[4834]: I0223 09:16:59.545458 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" event={"ID":"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00","Type":"ContainerStarted","Data":"07255cdb457d2ca7dbfd2e09ee22bccef76a8161cbbe856f823bd238df6203e9"} Feb 23 09:16:59 crc kubenswrapper[4834]: I0223 09:16:59.545504 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" event={"ID":"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00","Type":"ContainerStarted","Data":"b7c860c3163edd2b799520e6c89e76211647f8fbf76046cf268e760f9017d6a7"} Feb 23 09:16:59 crc kubenswrapper[4834]: I0223 09:16:59.545519 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" event={"ID":"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00","Type":"ContainerStarted","Data":"fac09e69ea8cc6dc3c4fcf96b31ff1e070659b0c07d5cb2fcba007298590604c"} Feb 23 09:16:59 crc kubenswrapper[4834]: I0223 09:16:59.545530 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" event={"ID":"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00","Type":"ContainerStarted","Data":"fcd36876e034c686572551666a7ee1cf0ba7263da945dd1c8585249cc688b349"} Feb 23 09:16:59 crc kubenswrapper[4834]: I0223 09:16:59.545542 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" event={"ID":"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00","Type":"ContainerStarted","Data":"bf71d315293576b44ea4ca1478ee44a3db3991793bc9e679a506702f6c5e7a12"} Feb 23 09:16:59 crc kubenswrapper[4834]: I0223 09:16:59.545552 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" event={"ID":"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00","Type":"ContainerStarted","Data":"8e44150c3692f9a390f3a05061da63215d46695e0b123db8e62feca5316812a0"} Feb 23 09:17:00 crc kubenswrapper[4834]: I0223 09:17:00.669589 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5102eeec-7776-42da-8027-c4e5f9c13450" path="/var/lib/kubelet/pods/5102eeec-7776-42da-8027-c4e5f9c13450/volumes" Feb 23 09:17:01 crc kubenswrapper[4834]: I0223 09:17:01.674776 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" event={"ID":"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00","Type":"ContainerStarted","Data":"5a86fc271dd51a13f93c6e6a4362023ebc6fca81a8a6ae4824410a6d6a96ce2d"} Feb 23 09:17:03 crc kubenswrapper[4834]: I0223 09:17:03.694081 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" event={"ID":"c752b9b6-b6e7-48f1-b2aa-9d4c7187ff00","Type":"ContainerStarted","Data":"24415f110815fac8e2c8ef4104ad828cc4c0490c36d44f4fc53f1db09b0edae6"} Feb 23 09:17:03 crc kubenswrapper[4834]: I0223 09:17:03.694512 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:17:03 crc kubenswrapper[4834]: I0223 09:17:03.694530 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:17:03 crc kubenswrapper[4834]: I0223 09:17:03.741740 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:17:03 crc kubenswrapper[4834]: I0223 09:17:03.773747 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" podStartSLOduration=6.773727618 podStartE2EDuration="6.773727618s" podCreationTimestamp="2026-02-23 09:16:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-02-23 09:17:03.735914477 +0000 UTC m=+559.814228904" watchObservedRunningTime="2026-02-23 09:17:03.773727618 +0000 UTC m=+559.852042005" Feb 23 09:17:04 crc kubenswrapper[4834]: I0223 09:17:04.700054 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:17:04 crc kubenswrapper[4834]: I0223 09:17:04.729039 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:17:18 crc kubenswrapper[4834]: I0223 09:17:18.410129 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceph"] Feb 23 09:17:18 crc kubenswrapper[4834]: I0223 09:17:18.412165 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph" Feb 23 09:17:18 crc kubenswrapper[4834]: I0223 09:17:18.414279 4834 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-gchp9" Feb 23 09:17:18 crc kubenswrapper[4834]: I0223 09:17:18.414998 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Feb 23 09:17:18 crc kubenswrapper[4834]: I0223 09:17:18.415364 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Feb 23 09:17:18 crc kubenswrapper[4834]: I0223 09:17:18.585982 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data\" (UniqueName: \"kubernetes.io/empty-dir/034328e2-f765-41c2-bf18-c25cde36414e-data\") pod \"ceph\" (UID: \"034328e2-f765-41c2-bf18-c25cde36414e\") " pod="openstack/ceph" Feb 23 09:17:18 crc kubenswrapper[4834]: I0223 09:17:18.586033 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run\" (UniqueName: \"kubernetes.io/empty-dir/034328e2-f765-41c2-bf18-c25cde36414e-run\") pod \"ceph\" (UID: \"034328e2-f765-41c2-bf18-c25cde36414e\") " pod="openstack/ceph" Feb 23 09:17:18 crc kubenswrapper[4834]: I0223 09:17:18.586174 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8j4wd\" (UniqueName: \"kubernetes.io/projected/034328e2-f765-41c2-bf18-c25cde36414e-kube-api-access-8j4wd\") pod \"ceph\" (UID: \"034328e2-f765-41c2-bf18-c25cde36414e\") " pod="openstack/ceph" Feb 23 09:17:18 crc kubenswrapper[4834]: I0223 09:17:18.586206 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log\" (UniqueName: \"kubernetes.io/empty-dir/034328e2-f765-41c2-bf18-c25cde36414e-log\") pod \"ceph\" (UID: \"034328e2-f765-41c2-bf18-c25cde36414e\") " pod="openstack/ceph" Feb 23 09:17:18 crc kubenswrapper[4834]: I0223 09:17:18.687388 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data\" (UniqueName: \"kubernetes.io/empty-dir/034328e2-f765-41c2-bf18-c25cde36414e-data\") pod \"ceph\" (UID: \"034328e2-f765-41c2-bf18-c25cde36414e\") " pod="openstack/ceph" Feb 23 09:17:18 crc kubenswrapper[4834]: I0223 09:17:18.687484 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run\" (UniqueName: \"kubernetes.io/empty-dir/034328e2-f765-41c2-bf18-c25cde36414e-run\") pod \"ceph\" (UID: \"034328e2-f765-41c2-bf18-c25cde36414e\") " pod="openstack/ceph" Feb 23 09:17:18 crc kubenswrapper[4834]: I0223 09:17:18.687530 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8j4wd\" (UniqueName: \"kubernetes.io/projected/034328e2-f765-41c2-bf18-c25cde36414e-kube-api-access-8j4wd\") pod \"ceph\" (UID: \"034328e2-f765-41c2-bf18-c25cde36414e\") " pod="openstack/ceph" Feb 23 09:17:18 crc kubenswrapper[4834]: I0223 09:17:18.687573 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log\" (UniqueName: \"kubernetes.io/empty-dir/034328e2-f765-41c2-bf18-c25cde36414e-log\") pod \"ceph\" (UID: \"034328e2-f765-41c2-bf18-c25cde36414e\") " pod="openstack/ceph" Feb 23 09:17:18 crc kubenswrapper[4834]: I0223 09:17:18.688291 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data\" (UniqueName: \"kubernetes.io/empty-dir/034328e2-f765-41c2-bf18-c25cde36414e-data\") pod \"ceph\" (UID: \"034328e2-f765-41c2-bf18-c25cde36414e\") " pod="openstack/ceph" Feb 23 09:17:18 crc kubenswrapper[4834]: I0223 09:17:18.688511 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log\" (UniqueName: \"kubernetes.io/empty-dir/034328e2-f765-41c2-bf18-c25cde36414e-log\") pod \"ceph\" (UID: \"034328e2-f765-41c2-bf18-c25cde36414e\") " pod="openstack/ceph" Feb 23 09:17:18 crc kubenswrapper[4834]: I0223 09:17:18.688642 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run\" (UniqueName: \"kubernetes.io/empty-dir/034328e2-f765-41c2-bf18-c25cde36414e-run\") pod \"ceph\" (UID: \"034328e2-f765-41c2-bf18-c25cde36414e\") " pod="openstack/ceph" Feb 23 09:17:18 crc kubenswrapper[4834]: I0223 09:17:18.726832 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8j4wd\" (UniqueName: \"kubernetes.io/projected/034328e2-f765-41c2-bf18-c25cde36414e-kube-api-access-8j4wd\") pod \"ceph\" (UID: \"034328e2-f765-41c2-bf18-c25cde36414e\") " pod="openstack/ceph" Feb 23 09:17:18 crc kubenswrapper[4834]: I0223 09:17:18.735430 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceph" Feb 23 09:17:18 crc kubenswrapper[4834]: W0223 09:17:18.763031 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod034328e2_f765_41c2_bf18_c25cde36414e.slice/crio-f7d91164398fc6ed2f890e705682a4ec04616f9d0dd237d6d70cc5e50317c46d WatchSource:0}: Error finding container f7d91164398fc6ed2f890e705682a4ec04616f9d0dd237d6d70cc5e50317c46d: Status 404 returned error can't find the container with id f7d91164398fc6ed2f890e705682a4ec04616f9d0dd237d6d70cc5e50317c46d Feb 23 09:17:18 crc kubenswrapper[4834]: I0223 09:17:18.806421 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph" event={"ID":"034328e2-f765-41c2-bf18-c25cde36414e","Type":"ContainerStarted","Data":"f7d91164398fc6ed2f890e705682a4ec04616f9d0dd237d6d70cc5e50317c46d"} Feb 23 09:17:18 crc kubenswrapper[4834]: E0223 09:17:18.816319 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:18 crc kubenswrapper[4834]: E0223 09:17:18.830738 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:19 crc kubenswrapper[4834]: E0223 09:17:19.973099 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:19 crc kubenswrapper[4834]: E0223 09:17:19.986744 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:21 crc kubenswrapper[4834]: E0223 09:17:21.114425 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:21 crc kubenswrapper[4834]: E0223 09:17:21.127622 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:22 crc kubenswrapper[4834]: E0223 09:17:22.263443 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:22 crc kubenswrapper[4834]: E0223 09:17:22.276375 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:23 crc kubenswrapper[4834]: E0223 09:17:23.465561 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:23 crc kubenswrapper[4834]: E0223 09:17:23.484555 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:24 crc kubenswrapper[4834]: E0223 09:17:24.632149 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:24 crc kubenswrapper[4834]: E0223 09:17:24.645176 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:25 crc kubenswrapper[4834]: E0223 09:17:25.784218 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:25 crc kubenswrapper[4834]: E0223 09:17:25.796951 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:26 crc kubenswrapper[4834]: E0223 09:17:26.941381 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:26 crc kubenswrapper[4834]: E0223 09:17:26.956756 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:28 crc kubenswrapper[4834]: E0223 09:17:28.107420 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:28 crc kubenswrapper[4834]: E0223 09:17:28.123229 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:28 crc kubenswrapper[4834]: I0223 09:17:28.193237 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-p8vqs" Feb 23 09:17:29 crc kubenswrapper[4834]: E0223 09:17:29.260791 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:29 crc kubenswrapper[4834]: E0223 09:17:29.272808 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:30 crc kubenswrapper[4834]: E0223 09:17:30.414334 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:30 crc kubenswrapper[4834]: E0223 09:17:30.425647 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:31 crc kubenswrapper[4834]: E0223 09:17:31.578623 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:31 crc kubenswrapper[4834]: E0223 09:17:31.593470 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:32 crc kubenswrapper[4834]: E0223 09:17:32.739468 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:32 crc kubenswrapper[4834]: E0223 09:17:32.751792 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:33 crc kubenswrapper[4834]: E0223 09:17:33.890152 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:33 crc kubenswrapper[4834]: E0223 09:17:33.908453 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:34 crc kubenswrapper[4834]: I0223 09:17:34.904083 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceph" event={"ID":"034328e2-f765-41c2-bf18-c25cde36414e","Type":"ContainerStarted","Data":"f9cd5e53d4829729dea4fc47372106e287e347e35149e4c1fe7891a2128858e1"} Feb 23 09:17:34 crc kubenswrapper[4834]: I0223 09:17:34.923502 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceph" podStartSLOduration=1.4935659860000001 podStartE2EDuration="16.92348795s" podCreationTimestamp="2026-02-23 09:17:18 +0000 UTC" firstStartedPulling="2026-02-23 09:17:18.767003749 +0000 UTC m=+574.845318166" lastFinishedPulling="2026-02-23 09:17:34.196925703 +0000 UTC m=+590.275240130" observedRunningTime="2026-02-23 09:17:34.920456007 +0000 UTC m=+590.998770394" watchObservedRunningTime="2026-02-23 09:17:34.92348795 +0000 UTC m=+591.001802337" Feb 23 09:17:35 crc kubenswrapper[4834]: E0223 09:17:35.068295 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:35 crc kubenswrapper[4834]: E0223 09:17:35.084049 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:36 crc kubenswrapper[4834]: E0223 09:17:36.225850 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:36 crc kubenswrapper[4834]: E0223 09:17:36.236806 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:37 crc kubenswrapper[4834]: E0223 09:17:37.384991 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:37 crc kubenswrapper[4834]: E0223 09:17:37.405943 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:38 crc kubenswrapper[4834]: E0223 09:17:38.560708 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:38 crc kubenswrapper[4834]: E0223 09:17:38.578217 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:39 crc kubenswrapper[4834]: E0223 09:17:39.719802 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:39 crc kubenswrapper[4834]: E0223 09:17:39.738706 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:40 crc kubenswrapper[4834]: E0223 09:17:40.885576 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:40 crc kubenswrapper[4834]: E0223 09:17:40.898106 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:42 crc kubenswrapper[4834]: E0223 09:17:42.078954 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:42 crc kubenswrapper[4834]: E0223 09:17:42.096962 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:43 crc kubenswrapper[4834]: E0223 09:17:43.232872 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:43 crc kubenswrapper[4834]: E0223 09:17:43.256906 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:44 crc kubenswrapper[4834]: E0223 09:17:44.414708 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:44 crc kubenswrapper[4834]: E0223 09:17:44.430814 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:45 crc kubenswrapper[4834]: E0223 09:17:45.562733 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:45 crc kubenswrapper[4834]: E0223 09:17:45.574820 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:46 crc kubenswrapper[4834]: E0223 09:17:46.702934 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:46 crc kubenswrapper[4834]: E0223 09:17:46.716341 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:47 crc kubenswrapper[4834]: E0223 09:17:47.861860 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:47 crc kubenswrapper[4834]: E0223 09:17:47.881317 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:49 crc kubenswrapper[4834]: E0223 09:17:49.028306 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:49 crc kubenswrapper[4834]: E0223 09:17:49.049913 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:50 crc kubenswrapper[4834]: E0223 09:17:50.244208 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:50 crc kubenswrapper[4834]: E0223 09:17:50.259868 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:51 crc kubenswrapper[4834]: E0223 09:17:51.401509 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:51 crc kubenswrapper[4834]: E0223 09:17:51.413895 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:52 crc kubenswrapper[4834]: E0223 09:17:52.544319 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:52 crc kubenswrapper[4834]: E0223 09:17:52.557811 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:53 crc kubenswrapper[4834]: E0223 09:17:53.704132 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:53 crc kubenswrapper[4834]: E0223 09:17:53.729063 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:54 crc kubenswrapper[4834]: E0223 09:17:54.859917 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:54 crc kubenswrapper[4834]: E0223 09:17:54.873596 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:56 crc kubenswrapper[4834]: E0223 09:17:56.020920 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:56 crc kubenswrapper[4834]: E0223 09:17:56.036391 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:57 crc kubenswrapper[4834]: E0223 09:17:57.175030 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:57 crc kubenswrapper[4834]: E0223 09:17:57.195290 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:58 crc kubenswrapper[4834]: E0223 09:17:58.337670 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:58 crc kubenswrapper[4834]: E0223 09:17:58.356128 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:59 crc kubenswrapper[4834]: E0223 09:17:59.500342 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:17:59 crc kubenswrapper[4834]: E0223 09:17:59.513007 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:00 crc kubenswrapper[4834]: E0223 09:18:00.639694 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:00 crc kubenswrapper[4834]: E0223 09:18:00.652378 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:01 crc kubenswrapper[4834]: E0223 09:18:01.810551 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:01 crc kubenswrapper[4834]: E0223 09:18:01.825993 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:02 crc kubenswrapper[4834]: E0223 09:18:02.974073 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:02 crc kubenswrapper[4834]: E0223 09:18:02.998708 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:04 crc kubenswrapper[4834]: E0223 09:18:04.138621 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:04 crc kubenswrapper[4834]: E0223 09:18:04.152271 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:05 crc kubenswrapper[4834]: E0223 09:18:05.288483 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:05 crc kubenswrapper[4834]: E0223 09:18:05.301124 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:06 crc kubenswrapper[4834]: E0223 09:18:06.446748 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:06 crc kubenswrapper[4834]: E0223 09:18:06.461133 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:07 crc kubenswrapper[4834]: E0223 09:18:07.622632 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:07 crc kubenswrapper[4834]: E0223 09:18:07.640913 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:08 crc kubenswrapper[4834]: E0223 09:18:08.795154 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:08 crc kubenswrapper[4834]: E0223 09:18:08.813459 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:09 crc kubenswrapper[4834]: E0223 09:18:09.956371 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:09 crc kubenswrapper[4834]: E0223 09:18:09.978371 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:11 crc kubenswrapper[4834]: E0223 09:18:11.128123 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:11 crc kubenswrapper[4834]: E0223 09:18:11.146163 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:12 crc kubenswrapper[4834]: E0223 09:18:12.290974 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:12 crc kubenswrapper[4834]: E0223 09:18:12.313631 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:13 crc kubenswrapper[4834]: E0223 09:18:13.440612 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:13 crc kubenswrapper[4834]: E0223 09:18:13.460014 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:14 crc kubenswrapper[4834]: E0223 09:18:14.638494 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:14 crc kubenswrapper[4834]: E0223 09:18:14.656377 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:15 crc kubenswrapper[4834]: E0223 09:18:15.837933 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:15 crc kubenswrapper[4834]: E0223 09:18:15.857878 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:17 crc kubenswrapper[4834]: E0223 09:18:17.043366 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:17 crc kubenswrapper[4834]: E0223 09:18:17.056760 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:18 crc kubenswrapper[4834]: E0223 09:18:18.212175 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:18 crc kubenswrapper[4834]: E0223 09:18:18.228919 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:19 crc kubenswrapper[4834]: E0223 09:18:19.400762 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:19 crc kubenswrapper[4834]: E0223 09:18:19.421176 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:20 crc kubenswrapper[4834]: E0223 09:18:20.554892 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:20 crc kubenswrapper[4834]: E0223 09:18:20.568452 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:21 crc kubenswrapper[4834]: E0223 09:18:21.709448 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:21 crc kubenswrapper[4834]: E0223 09:18:21.722944 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:22 crc kubenswrapper[4834]: E0223 09:18:22.862162 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:22 crc kubenswrapper[4834]: E0223 09:18:22.877599 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:24 crc kubenswrapper[4834]: E0223 09:18:24.025635 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:24 crc kubenswrapper[4834]: E0223 09:18:24.047247 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:25 crc kubenswrapper[4834]: E0223 09:18:25.197019 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:25 crc kubenswrapper[4834]: E0223 09:18:25.210067 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:26 crc kubenswrapper[4834]: E0223 09:18:26.366552 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:26 crc kubenswrapper[4834]: E0223 09:18:26.388162 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:27 crc kubenswrapper[4834]: E0223 09:18:27.536631 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:27 crc kubenswrapper[4834]: E0223 09:18:27.558293 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:27 crc kubenswrapper[4834]: I0223 09:18:27.810503 4834 patch_prober.go:28] interesting pod/machine-config-daemon-kt9lp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 23 09:18:27 crc kubenswrapper[4834]: I0223 09:18:27.810603 4834 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" podUID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 23 09:18:28 crc kubenswrapper[4834]: E0223 09:18:28.713350 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:28 crc kubenswrapper[4834]: E0223 09:18:28.736699 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:29 crc kubenswrapper[4834]: E0223 09:18:29.878096 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:29 crc kubenswrapper[4834]: E0223 09:18:29.899027 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:31 crc kubenswrapper[4834]: E0223 09:18:31.032777 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:31 crc kubenswrapper[4834]: E0223 09:18:31.052149 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:32 crc kubenswrapper[4834]: E0223 09:18:32.189593 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:32 crc kubenswrapper[4834]: E0223 09:18:32.204368 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:33 crc kubenswrapper[4834]: E0223 09:18:33.342070 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:33 crc kubenswrapper[4834]: E0223 09:18:33.360468 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:34 crc kubenswrapper[4834]: E0223 09:18:34.515923 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:34 crc kubenswrapper[4834]: E0223 09:18:34.537791 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:35 crc kubenswrapper[4834]: E0223 09:18:35.697642 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:35 crc kubenswrapper[4834]: E0223 09:18:35.713163 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:36 crc kubenswrapper[4834]: E0223 09:18:36.846679 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:36 crc kubenswrapper[4834]: E0223 09:18:36.864638 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:38 crc kubenswrapper[4834]: E0223 09:18:38.030890 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:38 crc kubenswrapper[4834]: E0223 09:18:38.051952 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:39 crc kubenswrapper[4834]: E0223 09:18:39.187975 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:39 crc kubenswrapper[4834]: E0223 09:18:39.208430 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:40 crc kubenswrapper[4834]: E0223 09:18:40.400562 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:40 crc kubenswrapper[4834]: E0223 09:18:40.421592 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:41 crc kubenswrapper[4834]: E0223 09:18:41.554840 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:41 crc kubenswrapper[4834]: E0223 09:18:41.573677 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:42 crc kubenswrapper[4834]: E0223 09:18:42.734843 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:42 crc kubenswrapper[4834]: E0223 09:18:42.749842 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:43 crc kubenswrapper[4834]: E0223 09:18:43.883522 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:43 crc kubenswrapper[4834]: E0223 09:18:43.899179 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:45 crc kubenswrapper[4834]: E0223 09:18:45.046697 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:45 crc kubenswrapper[4834]: E0223 09:18:45.066357 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:46 crc kubenswrapper[4834]: E0223 09:18:46.223247 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:46 crc kubenswrapper[4834]: E0223 09:18:46.244507 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:47 crc kubenswrapper[4834]: E0223 09:18:47.395629 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:47 crc kubenswrapper[4834]: E0223 09:18:47.413974 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:48 crc kubenswrapper[4834]: E0223 09:18:48.537554 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:48 crc kubenswrapper[4834]: E0223 09:18:48.548404 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:49 crc kubenswrapper[4834]: E0223 09:18:49.714373 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:49 crc kubenswrapper[4834]: E0223 09:18:49.731541 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:50 crc kubenswrapper[4834]: E0223 09:18:50.903879 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:50 crc kubenswrapper[4834]: E0223 09:18:50.923712 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:52 crc kubenswrapper[4834]: E0223 09:18:52.061532 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:52 crc kubenswrapper[4834]: E0223 09:18:52.085753 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:53 crc kubenswrapper[4834]: E0223 09:18:53.261290 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:53 crc kubenswrapper[4834]: E0223 09:18:53.278886 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:54 crc kubenswrapper[4834]: E0223 09:18:54.476697 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:54 crc kubenswrapper[4834]: E0223 09:18:54.494662 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:55 crc kubenswrapper[4834]: E0223 09:18:55.644107 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:55 crc kubenswrapper[4834]: E0223 09:18:55.664386 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:56 crc kubenswrapper[4834]: E0223 09:18:56.797705 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:56 crc kubenswrapper[4834]: E0223 09:18:56.815841 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:57 crc kubenswrapper[4834]: I0223 09:18:57.810578 4834 patch_prober.go:28] interesting pod/machine-config-daemon-kt9lp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 23 09:18:57 crc kubenswrapper[4834]: I0223 09:18:57.810669 4834 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" podUID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 23 09:18:57 crc kubenswrapper[4834]: E0223 09:18:57.960290 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:57 crc kubenswrapper[4834]: E0223 09:18:57.975560 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:59 crc kubenswrapper[4834]: E0223 09:18:59.096999 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:18:59 crc kubenswrapper[4834]: E0223 09:18:59.112980 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:00 crc kubenswrapper[4834]: E0223 09:19:00.283565 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:00 crc kubenswrapper[4834]: E0223 09:19:00.303884 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:01 crc kubenswrapper[4834]: E0223 09:19:01.474775 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:01 crc kubenswrapper[4834]: E0223 09:19:01.496187 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:02 crc kubenswrapper[4834]: E0223 09:19:02.648885 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:02 crc kubenswrapper[4834]: E0223 09:19:02.668378 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:03 crc kubenswrapper[4834]: E0223 09:19:03.815526 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:03 crc kubenswrapper[4834]: E0223 09:19:03.827778 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:04 crc kubenswrapper[4834]: E0223 09:19:04.964497 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:05 crc kubenswrapper[4834]: E0223 09:19:05.007296 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:06 crc kubenswrapper[4834]: E0223 09:19:06.136176 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:06 crc kubenswrapper[4834]: E0223 09:19:06.146640 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:07 crc kubenswrapper[4834]: E0223 09:19:07.281566 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:07 crc kubenswrapper[4834]: E0223 09:19:07.293578 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:08 crc kubenswrapper[4834]: E0223 09:19:08.429541 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:08 crc kubenswrapper[4834]: E0223 09:19:08.442835 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:09 crc kubenswrapper[4834]: E0223 09:19:09.594073 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:09 crc kubenswrapper[4834]: E0223 09:19:09.606207 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:10 crc kubenswrapper[4834]: E0223 09:19:10.740727 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:10 crc kubenswrapper[4834]: E0223 09:19:10.755456 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:11 crc kubenswrapper[4834]: E0223 09:19:11.889500 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:11 crc kubenswrapper[4834]: E0223 09:19:11.902042 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:13 crc kubenswrapper[4834]: E0223 09:19:13.031493 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:13 crc kubenswrapper[4834]: E0223 09:19:13.046291 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:14 crc kubenswrapper[4834]: E0223 09:19:14.178516 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:14 crc kubenswrapper[4834]: E0223 09:19:14.191003 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:15 crc kubenswrapper[4834]: E0223 09:19:15.332983 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:15 crc kubenswrapper[4834]: E0223 09:19:15.347291 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:16 crc kubenswrapper[4834]: E0223 09:19:16.494506 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:16 crc kubenswrapper[4834]: E0223 09:19:16.508820 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:17 crc kubenswrapper[4834]: E0223 09:19:17.651572 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:17 crc kubenswrapper[4834]: E0223 09:19:17.665706 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:18 crc kubenswrapper[4834]: E0223 09:19:18.854073 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:18 crc kubenswrapper[4834]: E0223 09:19:18.874568 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:20 crc kubenswrapper[4834]: E0223 09:19:20.042001 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:20 crc kubenswrapper[4834]: E0223 09:19:20.063291 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:21 crc kubenswrapper[4834]: E0223 09:19:21.199233 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:21 crc kubenswrapper[4834]: E0223 09:19:21.220056 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:22 crc kubenswrapper[4834]: E0223 09:19:22.368866 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:22 crc kubenswrapper[4834]: E0223 09:19:22.381696 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:23 crc kubenswrapper[4834]: E0223 09:19:23.530732 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:23 crc kubenswrapper[4834]: E0223 09:19:23.546330 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:24 crc kubenswrapper[4834]: E0223 09:19:24.711901 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:24 crc kubenswrapper[4834]: E0223 09:19:24.728880 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:25 crc kubenswrapper[4834]: E0223 09:19:25.953294 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:25 crc kubenswrapper[4834]: E0223 09:19:25.971765 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:27 crc kubenswrapper[4834]: E0223 09:19:27.137335 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:27 crc kubenswrapper[4834]: E0223 09:19:27.159818 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:27 crc kubenswrapper[4834]: I0223 09:19:27.810654 4834 patch_prober.go:28] interesting pod/machine-config-daemon-kt9lp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 23 09:19:27 crc kubenswrapper[4834]: I0223 09:19:27.810710 4834 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" podUID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 23 09:19:27 crc kubenswrapper[4834]: I0223 09:19:27.810752 4834 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" Feb 23 09:19:27 crc kubenswrapper[4834]: I0223 09:19:27.811256 4834 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8b47de6c39b49990870479ce2e9821e6b671e9454136c5cd3f39336a30c7e515"} pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 23 09:19:27 crc kubenswrapper[4834]: I0223 09:19:27.811315 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" podUID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerName="machine-config-daemon" containerID="cri-o://8b47de6c39b49990870479ce2e9821e6b671e9454136c5cd3f39336a30c7e515" gracePeriod=600 Feb 23 09:19:28 crc kubenswrapper[4834]: E0223 09:19:28.306986 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:28 crc kubenswrapper[4834]: E0223 09:19:28.320498 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:28 crc kubenswrapper[4834]: I0223 09:19:28.605992 4834 generic.go:334] "Generic (PLEG): container finished" podID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerID="8b47de6c39b49990870479ce2e9821e6b671e9454136c5cd3f39336a30c7e515" exitCode=0 Feb 23 09:19:28 crc kubenswrapper[4834]: I0223 09:19:28.606058 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" event={"ID":"1172b9a5-71ca-49e9-a033-3b59c9c024a4","Type":"ContainerDied","Data":"8b47de6c39b49990870479ce2e9821e6b671e9454136c5cd3f39336a30c7e515"} Feb 23 09:19:28 crc kubenswrapper[4834]: I0223 09:19:28.606102 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" event={"ID":"1172b9a5-71ca-49e9-a033-3b59c9c024a4","Type":"ContainerStarted","Data":"d30a3a99709256ecf0353a89cecf613454777e4a41fa8a0ad21693babdbd0aa0"} Feb 23 09:19:28 crc kubenswrapper[4834]: I0223 09:19:28.606129 4834 scope.go:117] "RemoveContainer" containerID="88a28e626207b9996f70605163951b8b51203e8d24ce2c7e8948a63be9984191" Feb 23 09:19:29 crc kubenswrapper[4834]: E0223 09:19:29.483059 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:29 crc kubenswrapper[4834]: E0223 09:19:29.498873 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:30 crc kubenswrapper[4834]: E0223 09:19:30.646006 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:30 crc kubenswrapper[4834]: E0223 09:19:30.662553 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:31 crc kubenswrapper[4834]: E0223 09:19:31.896790 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:31 crc kubenswrapper[4834]: E0223 09:19:31.914719 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:33 crc kubenswrapper[4834]: E0223 09:19:33.070639 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:33 crc kubenswrapper[4834]: E0223 09:19:33.086684 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:34 crc kubenswrapper[4834]: E0223 09:19:34.225990 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:34 crc kubenswrapper[4834]: E0223 09:19:34.237126 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:35 crc kubenswrapper[4834]: E0223 09:19:35.378469 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:35 crc kubenswrapper[4834]: E0223 09:19:35.390862 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:36 crc kubenswrapper[4834]: E0223 09:19:36.526555 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:36 crc kubenswrapper[4834]: E0223 09:19:36.541620 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:37 crc kubenswrapper[4834]: E0223 09:19:37.679791 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:37 crc kubenswrapper[4834]: E0223 09:19:37.693515 4834 server.go:309] "Unable to authenticate the request due to an error" err="verifying certificate SN=4336253678461163118, SKID=, AKID=1B:D2:12:C3:90:84:22:56:C5:D4:AE:2A:EE:5C:8A:78:83:BF:EB:FE failed: x509: certificate signed by unknown authority" Feb 23 09:19:58 crc kubenswrapper[4834]: I0223 09:19:58.675609 4834 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Feb 23 09:20:08 crc kubenswrapper[4834]: I0223 09:20:08.957497 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-wl2ws/must-gather-p9n75"] Feb 23 09:20:08 crc kubenswrapper[4834]: I0223 09:20:08.958717 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-wl2ws/must-gather-p9n75" Feb 23 09:20:08 crc kubenswrapper[4834]: I0223 09:20:08.961476 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-wl2ws"/"openshift-service-ca.crt" Feb 23 09:20:08 crc kubenswrapper[4834]: I0223 09:20:08.961562 4834 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-wl2ws"/"kube-root-ca.crt" Feb 23 09:20:08 crc kubenswrapper[4834]: I0223 09:20:08.968915 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-wl2ws/must-gather-p9n75"] Feb 23 09:20:09 crc kubenswrapper[4834]: I0223 09:20:09.080862 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/36931913-6ce1-4442-9640-194bcf85a32c-must-gather-output\") pod \"must-gather-p9n75\" (UID: \"36931913-6ce1-4442-9640-194bcf85a32c\") " pod="openshift-must-gather-wl2ws/must-gather-p9n75" Feb 23 09:20:09 crc kubenswrapper[4834]: I0223 09:20:09.080954 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zk5n8\" (UniqueName: \"kubernetes.io/projected/36931913-6ce1-4442-9640-194bcf85a32c-kube-api-access-zk5n8\") pod \"must-gather-p9n75\" (UID: \"36931913-6ce1-4442-9640-194bcf85a32c\") " pod="openshift-must-gather-wl2ws/must-gather-p9n75" Feb 23 09:20:09 crc kubenswrapper[4834]: I0223 09:20:09.182649 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zk5n8\" (UniqueName: \"kubernetes.io/projected/36931913-6ce1-4442-9640-194bcf85a32c-kube-api-access-zk5n8\") pod \"must-gather-p9n75\" (UID: \"36931913-6ce1-4442-9640-194bcf85a32c\") " pod="openshift-must-gather-wl2ws/must-gather-p9n75" Feb 23 09:20:09 crc kubenswrapper[4834]: I0223 09:20:09.182735 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/36931913-6ce1-4442-9640-194bcf85a32c-must-gather-output\") pod \"must-gather-p9n75\" (UID: \"36931913-6ce1-4442-9640-194bcf85a32c\") " pod="openshift-must-gather-wl2ws/must-gather-p9n75" Feb 23 09:20:09 crc kubenswrapper[4834]: I0223 09:20:09.183150 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/36931913-6ce1-4442-9640-194bcf85a32c-must-gather-output\") pod \"must-gather-p9n75\" (UID: \"36931913-6ce1-4442-9640-194bcf85a32c\") " pod="openshift-must-gather-wl2ws/must-gather-p9n75" Feb 23 09:20:09 crc kubenswrapper[4834]: I0223 09:20:09.198264 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zk5n8\" (UniqueName: \"kubernetes.io/projected/36931913-6ce1-4442-9640-194bcf85a32c-kube-api-access-zk5n8\") pod \"must-gather-p9n75\" (UID: \"36931913-6ce1-4442-9640-194bcf85a32c\") " pod="openshift-must-gather-wl2ws/must-gather-p9n75" Feb 23 09:20:09 crc kubenswrapper[4834]: I0223 09:20:09.273134 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-wl2ws/must-gather-p9n75" Feb 23 09:20:09 crc kubenswrapper[4834]: I0223 09:20:09.482989 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-wl2ws/must-gather-p9n75"] Feb 23 09:20:09 crc kubenswrapper[4834]: I0223 09:20:09.856766 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-wl2ws/must-gather-p9n75" event={"ID":"36931913-6ce1-4442-9640-194bcf85a32c","Type":"ContainerStarted","Data":"b7bd8b2369a836133ae10e802d70f14718144d4a562bf74bc4ead55b4265d528"} Feb 23 09:20:14 crc kubenswrapper[4834]: I0223 09:20:14.889846 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-wl2ws/must-gather-p9n75" event={"ID":"36931913-6ce1-4442-9640-194bcf85a32c","Type":"ContainerStarted","Data":"f07f53cddcea40aa352433e38b2f146d3bf02d5c2167d73c49d1b93414e7cc17"} Feb 23 09:20:14 crc kubenswrapper[4834]: I0223 09:20:14.890496 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-wl2ws/must-gather-p9n75" event={"ID":"36931913-6ce1-4442-9640-194bcf85a32c","Type":"ContainerStarted","Data":"55bb2427e85aa0de31fe2d5f45aba723f981f78f6756c29fbec8f903401fd241"} Feb 23 09:20:14 crc kubenswrapper[4834]: I0223 09:20:14.914732 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-wl2ws/must-gather-p9n75" podStartSLOduration=1.9957908149999999 podStartE2EDuration="6.914711458s" podCreationTimestamp="2026-02-23 09:20:08 +0000 UTC" firstStartedPulling="2026-02-23 09:20:09.490424313 +0000 UTC m=+745.568738700" lastFinishedPulling="2026-02-23 09:20:14.409344956 +0000 UTC m=+750.487659343" observedRunningTime="2026-02-23 09:20:14.908813455 +0000 UTC m=+750.987127922" watchObservedRunningTime="2026-02-23 09:20:14.914711458 +0000 UTC m=+750.993025925" Feb 23 09:20:15 crc kubenswrapper[4834]: I0223 09:20:15.692610 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2p2q9"] Feb 23 09:20:15 crc kubenswrapper[4834]: I0223 09:20:15.693730 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2p2q9" Feb 23 09:20:15 crc kubenswrapper[4834]: I0223 09:20:15.702418 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2p2q9"] Feb 23 09:20:15 crc kubenswrapper[4834]: I0223 09:20:15.872292 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd11248f-d62a-4a4d-8627-1f17058bd6be-utilities\") pod \"redhat-marketplace-2p2q9\" (UID: \"bd11248f-d62a-4a4d-8627-1f17058bd6be\") " pod="openshift-marketplace/redhat-marketplace-2p2q9" Feb 23 09:20:15 crc kubenswrapper[4834]: I0223 09:20:15.872612 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2c78j\" (UniqueName: \"kubernetes.io/projected/bd11248f-d62a-4a4d-8627-1f17058bd6be-kube-api-access-2c78j\") pod \"redhat-marketplace-2p2q9\" (UID: \"bd11248f-d62a-4a4d-8627-1f17058bd6be\") " pod="openshift-marketplace/redhat-marketplace-2p2q9" Feb 23 09:20:15 crc kubenswrapper[4834]: I0223 09:20:15.872648 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd11248f-d62a-4a4d-8627-1f17058bd6be-catalog-content\") pod \"redhat-marketplace-2p2q9\" (UID: \"bd11248f-d62a-4a4d-8627-1f17058bd6be\") " pod="openshift-marketplace/redhat-marketplace-2p2q9" Feb 23 09:20:15 crc kubenswrapper[4834]: I0223 09:20:15.973650 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd11248f-d62a-4a4d-8627-1f17058bd6be-utilities\") pod \"redhat-marketplace-2p2q9\" (UID: \"bd11248f-d62a-4a4d-8627-1f17058bd6be\") " pod="openshift-marketplace/redhat-marketplace-2p2q9" Feb 23 09:20:15 crc kubenswrapper[4834]: I0223 09:20:15.973699 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2c78j\" (UniqueName: \"kubernetes.io/projected/bd11248f-d62a-4a4d-8627-1f17058bd6be-kube-api-access-2c78j\") pod \"redhat-marketplace-2p2q9\" (UID: \"bd11248f-d62a-4a4d-8627-1f17058bd6be\") " pod="openshift-marketplace/redhat-marketplace-2p2q9" Feb 23 09:20:15 crc kubenswrapper[4834]: I0223 09:20:15.974000 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd11248f-d62a-4a4d-8627-1f17058bd6be-catalog-content\") pod \"redhat-marketplace-2p2q9\" (UID: \"bd11248f-d62a-4a4d-8627-1f17058bd6be\") " pod="openshift-marketplace/redhat-marketplace-2p2q9" Feb 23 09:20:15 crc kubenswrapper[4834]: I0223 09:20:15.974298 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd11248f-d62a-4a4d-8627-1f17058bd6be-catalog-content\") pod \"redhat-marketplace-2p2q9\" (UID: \"bd11248f-d62a-4a4d-8627-1f17058bd6be\") " pod="openshift-marketplace/redhat-marketplace-2p2q9" Feb 23 09:20:15 crc kubenswrapper[4834]: I0223 09:20:15.974323 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd11248f-d62a-4a4d-8627-1f17058bd6be-utilities\") pod \"redhat-marketplace-2p2q9\" (UID: \"bd11248f-d62a-4a4d-8627-1f17058bd6be\") " pod="openshift-marketplace/redhat-marketplace-2p2q9" Feb 23 09:20:15 crc kubenswrapper[4834]: I0223 09:20:15.995821 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2c78j\" (UniqueName: \"kubernetes.io/projected/bd11248f-d62a-4a4d-8627-1f17058bd6be-kube-api-access-2c78j\") pod \"redhat-marketplace-2p2q9\" (UID: \"bd11248f-d62a-4a4d-8627-1f17058bd6be\") " pod="openshift-marketplace/redhat-marketplace-2p2q9" Feb 23 09:20:16 crc kubenswrapper[4834]: I0223 09:20:16.009608 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2p2q9" Feb 23 09:20:16 crc kubenswrapper[4834]: I0223 09:20:16.265787 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2p2q9"] Feb 23 09:20:16 crc kubenswrapper[4834]: I0223 09:20:16.903802 4834 generic.go:334] "Generic (PLEG): container finished" podID="bd11248f-d62a-4a4d-8627-1f17058bd6be" containerID="7e6a872345306809ff8c86ae2ce75598e1ae6dba9e1348d40a7e35718652bbc6" exitCode=0 Feb 23 09:20:16 crc kubenswrapper[4834]: I0223 09:20:16.904318 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2p2q9" event={"ID":"bd11248f-d62a-4a4d-8627-1f17058bd6be","Type":"ContainerDied","Data":"7e6a872345306809ff8c86ae2ce75598e1ae6dba9e1348d40a7e35718652bbc6"} Feb 23 09:20:16 crc kubenswrapper[4834]: I0223 09:20:16.904369 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2p2q9" event={"ID":"bd11248f-d62a-4a4d-8627-1f17058bd6be","Type":"ContainerStarted","Data":"48432aa55791a6a5c3900d6374e0c666a8bbccc33c8275a1ae6f171abad3e2b1"} Feb 23 09:20:17 crc kubenswrapper[4834]: I0223 09:20:17.910354 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2p2q9" event={"ID":"bd11248f-d62a-4a4d-8627-1f17058bd6be","Type":"ContainerStarted","Data":"6e47f7764c000a8dbf32b872205b7f1fde35b4cfb05cacb3f7b00e26cba12168"} Feb 23 09:20:18 crc kubenswrapper[4834]: I0223 09:20:18.920159 4834 generic.go:334] "Generic (PLEG): container finished" podID="bd11248f-d62a-4a4d-8627-1f17058bd6be" containerID="6e47f7764c000a8dbf32b872205b7f1fde35b4cfb05cacb3f7b00e26cba12168" exitCode=0 Feb 23 09:20:18 crc kubenswrapper[4834]: I0223 09:20:18.920214 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2p2q9" event={"ID":"bd11248f-d62a-4a4d-8627-1f17058bd6be","Type":"ContainerDied","Data":"6e47f7764c000a8dbf32b872205b7f1fde35b4cfb05cacb3f7b00e26cba12168"} Feb 23 09:20:19 crc kubenswrapper[4834]: I0223 09:20:19.931576 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2p2q9" event={"ID":"bd11248f-d62a-4a4d-8627-1f17058bd6be","Type":"ContainerStarted","Data":"3e9a39c18847d0506aba1be2afb84d7135dbafa3442fd0e64ff48188368fffd9"} Feb 23 09:20:19 crc kubenswrapper[4834]: I0223 09:20:19.957450 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2p2q9" podStartSLOduration=2.37385308 podStartE2EDuration="4.957425885s" podCreationTimestamp="2026-02-23 09:20:15 +0000 UTC" firstStartedPulling="2026-02-23 09:20:16.905853529 +0000 UTC m=+752.984167916" lastFinishedPulling="2026-02-23 09:20:19.489426314 +0000 UTC m=+755.567740721" observedRunningTime="2026-02-23 09:20:19.950746071 +0000 UTC m=+756.029060498" watchObservedRunningTime="2026-02-23 09:20:19.957425885 +0000 UTC m=+756.035740312" Feb 23 09:20:26 crc kubenswrapper[4834]: I0223 09:20:26.011209 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2p2q9" Feb 23 09:20:26 crc kubenswrapper[4834]: I0223 09:20:26.011546 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2p2q9" Feb 23 09:20:26 crc kubenswrapper[4834]: I0223 09:20:26.051791 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2p2q9" Feb 23 09:20:27 crc kubenswrapper[4834]: I0223 09:20:27.020757 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2p2q9" Feb 23 09:20:27 crc kubenswrapper[4834]: I0223 09:20:27.064730 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2p2q9"] Feb 23 09:20:28 crc kubenswrapper[4834]: I0223 09:20:28.734698 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceph_034328e2-f765-41c2-bf18-c25cde36414e/ceph/0.log" Feb 23 09:20:28 crc kubenswrapper[4834]: I0223 09:20:28.988632 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2p2q9" podUID="bd11248f-d62a-4a4d-8627-1f17058bd6be" containerName="registry-server" containerID="cri-o://3e9a39c18847d0506aba1be2afb84d7135dbafa3442fd0e64ff48188368fffd9" gracePeriod=2 Feb 23 09:20:29 crc kubenswrapper[4834]: I0223 09:20:29.324786 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2p2q9" Feb 23 09:20:29 crc kubenswrapper[4834]: I0223 09:20:29.445334 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd11248f-d62a-4a4d-8627-1f17058bd6be-catalog-content\") pod \"bd11248f-d62a-4a4d-8627-1f17058bd6be\" (UID: \"bd11248f-d62a-4a4d-8627-1f17058bd6be\") " Feb 23 09:20:29 crc kubenswrapper[4834]: I0223 09:20:29.445517 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2c78j\" (UniqueName: \"kubernetes.io/projected/bd11248f-d62a-4a4d-8627-1f17058bd6be-kube-api-access-2c78j\") pod \"bd11248f-d62a-4a4d-8627-1f17058bd6be\" (UID: \"bd11248f-d62a-4a4d-8627-1f17058bd6be\") " Feb 23 09:20:29 crc kubenswrapper[4834]: I0223 09:20:29.445553 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd11248f-d62a-4a4d-8627-1f17058bd6be-utilities\") pod \"bd11248f-d62a-4a4d-8627-1f17058bd6be\" (UID: \"bd11248f-d62a-4a4d-8627-1f17058bd6be\") " Feb 23 09:20:29 crc kubenswrapper[4834]: I0223 09:20:29.446864 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd11248f-d62a-4a4d-8627-1f17058bd6be-utilities" (OuterVolumeSpecName: "utilities") pod "bd11248f-d62a-4a4d-8627-1f17058bd6be" (UID: "bd11248f-d62a-4a4d-8627-1f17058bd6be"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:20:29 crc kubenswrapper[4834]: I0223 09:20:29.459596 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd11248f-d62a-4a4d-8627-1f17058bd6be-kube-api-access-2c78j" (OuterVolumeSpecName: "kube-api-access-2c78j") pod "bd11248f-d62a-4a4d-8627-1f17058bd6be" (UID: "bd11248f-d62a-4a4d-8627-1f17058bd6be"). InnerVolumeSpecName "kube-api-access-2c78j". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:20:29 crc kubenswrapper[4834]: I0223 09:20:29.471001 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd11248f-d62a-4a4d-8627-1f17058bd6be-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bd11248f-d62a-4a4d-8627-1f17058bd6be" (UID: "bd11248f-d62a-4a4d-8627-1f17058bd6be"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:20:29 crc kubenswrapper[4834]: I0223 09:20:29.547149 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2c78j\" (UniqueName: \"kubernetes.io/projected/bd11248f-d62a-4a4d-8627-1f17058bd6be-kube-api-access-2c78j\") on node \"crc\" DevicePath \"\"" Feb 23 09:20:29 crc kubenswrapper[4834]: I0223 09:20:29.547209 4834 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bd11248f-d62a-4a4d-8627-1f17058bd6be-utilities\") on node \"crc\" DevicePath \"\"" Feb 23 09:20:29 crc kubenswrapper[4834]: I0223 09:20:29.547228 4834 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bd11248f-d62a-4a4d-8627-1f17058bd6be-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 23 09:20:29 crc kubenswrapper[4834]: I0223 09:20:29.997082 4834 generic.go:334] "Generic (PLEG): container finished" podID="bd11248f-d62a-4a4d-8627-1f17058bd6be" containerID="3e9a39c18847d0506aba1be2afb84d7135dbafa3442fd0e64ff48188368fffd9" exitCode=0 Feb 23 09:20:29 crc kubenswrapper[4834]: I0223 09:20:29.997127 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2p2q9" event={"ID":"bd11248f-d62a-4a4d-8627-1f17058bd6be","Type":"ContainerDied","Data":"3e9a39c18847d0506aba1be2afb84d7135dbafa3442fd0e64ff48188368fffd9"} Feb 23 09:20:29 crc kubenswrapper[4834]: I0223 09:20:29.997232 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2p2q9" event={"ID":"bd11248f-d62a-4a4d-8627-1f17058bd6be","Type":"ContainerDied","Data":"48432aa55791a6a5c3900d6374e0c666a8bbccc33c8275a1ae6f171abad3e2b1"} Feb 23 09:20:29 crc kubenswrapper[4834]: I0223 09:20:29.997233 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2p2q9" Feb 23 09:20:29 crc kubenswrapper[4834]: I0223 09:20:29.997261 4834 scope.go:117] "RemoveContainer" containerID="3e9a39c18847d0506aba1be2afb84d7135dbafa3442fd0e64ff48188368fffd9" Feb 23 09:20:30 crc kubenswrapper[4834]: I0223 09:20:30.020834 4834 scope.go:117] "RemoveContainer" containerID="6e47f7764c000a8dbf32b872205b7f1fde35b4cfb05cacb3f7b00e26cba12168" Feb 23 09:20:30 crc kubenswrapper[4834]: I0223 09:20:30.047694 4834 scope.go:117] "RemoveContainer" containerID="7e6a872345306809ff8c86ae2ce75598e1ae6dba9e1348d40a7e35718652bbc6" Feb 23 09:20:30 crc kubenswrapper[4834]: I0223 09:20:30.056092 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2p2q9"] Feb 23 09:20:30 crc kubenswrapper[4834]: I0223 09:20:30.062045 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2p2q9"] Feb 23 09:20:30 crc kubenswrapper[4834]: I0223 09:20:30.082895 4834 scope.go:117] "RemoveContainer" containerID="3e9a39c18847d0506aba1be2afb84d7135dbafa3442fd0e64ff48188368fffd9" Feb 23 09:20:30 crc kubenswrapper[4834]: E0223 09:20:30.083440 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3e9a39c18847d0506aba1be2afb84d7135dbafa3442fd0e64ff48188368fffd9\": container with ID starting with 3e9a39c18847d0506aba1be2afb84d7135dbafa3442fd0e64ff48188368fffd9 not found: ID does not exist" containerID="3e9a39c18847d0506aba1be2afb84d7135dbafa3442fd0e64ff48188368fffd9" Feb 23 09:20:30 crc kubenswrapper[4834]: I0223 09:20:30.083484 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3e9a39c18847d0506aba1be2afb84d7135dbafa3442fd0e64ff48188368fffd9"} err="failed to get container status \"3e9a39c18847d0506aba1be2afb84d7135dbafa3442fd0e64ff48188368fffd9\": rpc error: code = NotFound desc = could not find container \"3e9a39c18847d0506aba1be2afb84d7135dbafa3442fd0e64ff48188368fffd9\": container with ID starting with 3e9a39c18847d0506aba1be2afb84d7135dbafa3442fd0e64ff48188368fffd9 not found: ID does not exist" Feb 23 09:20:30 crc kubenswrapper[4834]: I0223 09:20:30.083512 4834 scope.go:117] "RemoveContainer" containerID="6e47f7764c000a8dbf32b872205b7f1fde35b4cfb05cacb3f7b00e26cba12168" Feb 23 09:20:30 crc kubenswrapper[4834]: E0223 09:20:30.085118 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e47f7764c000a8dbf32b872205b7f1fde35b4cfb05cacb3f7b00e26cba12168\": container with ID starting with 6e47f7764c000a8dbf32b872205b7f1fde35b4cfb05cacb3f7b00e26cba12168 not found: ID does not exist" containerID="6e47f7764c000a8dbf32b872205b7f1fde35b4cfb05cacb3f7b00e26cba12168" Feb 23 09:20:30 crc kubenswrapper[4834]: I0223 09:20:30.085157 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e47f7764c000a8dbf32b872205b7f1fde35b4cfb05cacb3f7b00e26cba12168"} err="failed to get container status \"6e47f7764c000a8dbf32b872205b7f1fde35b4cfb05cacb3f7b00e26cba12168\": rpc error: code = NotFound desc = could not find container \"6e47f7764c000a8dbf32b872205b7f1fde35b4cfb05cacb3f7b00e26cba12168\": container with ID starting with 6e47f7764c000a8dbf32b872205b7f1fde35b4cfb05cacb3f7b00e26cba12168 not found: ID does not exist" Feb 23 09:20:30 crc kubenswrapper[4834]: I0223 09:20:30.085192 4834 scope.go:117] "RemoveContainer" containerID="7e6a872345306809ff8c86ae2ce75598e1ae6dba9e1348d40a7e35718652bbc6" Feb 23 09:20:30 crc kubenswrapper[4834]: E0223 09:20:30.085599 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e6a872345306809ff8c86ae2ce75598e1ae6dba9e1348d40a7e35718652bbc6\": container with ID starting with 7e6a872345306809ff8c86ae2ce75598e1ae6dba9e1348d40a7e35718652bbc6 not found: ID does not exist" containerID="7e6a872345306809ff8c86ae2ce75598e1ae6dba9e1348d40a7e35718652bbc6" Feb 23 09:20:30 crc kubenswrapper[4834]: I0223 09:20:30.085626 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e6a872345306809ff8c86ae2ce75598e1ae6dba9e1348d40a7e35718652bbc6"} err="failed to get container status \"7e6a872345306809ff8c86ae2ce75598e1ae6dba9e1348d40a7e35718652bbc6\": rpc error: code = NotFound desc = could not find container \"7e6a872345306809ff8c86ae2ce75598e1ae6dba9e1348d40a7e35718652bbc6\": container with ID starting with 7e6a872345306809ff8c86ae2ce75598e1ae6dba9e1348d40a7e35718652bbc6 not found: ID does not exist" Feb 23 09:20:30 crc kubenswrapper[4834]: I0223 09:20:30.597858 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd11248f-d62a-4a4d-8627-1f17058bd6be" path="/var/lib/kubelet/pods/bd11248f-d62a-4a4d-8627-1f17058bd6be/volumes" Feb 23 09:20:53 crc kubenswrapper[4834]: I0223 09:20:53.215489 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-k9lbb_c76b76a1-92df-4e16-b72b-ae9f3d952c72/control-plane-machine-set-operator/0.log" Feb 23 09:20:53 crc kubenswrapper[4834]: I0223 09:20:53.370600 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-bjngd_0f47419a-2e53-440b-854b-9fd226fb17d2/kube-rbac-proxy/0.log" Feb 23 09:20:53 crc kubenswrapper[4834]: I0223 09:20:53.393479 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-bjngd_0f47419a-2e53-440b-854b-9fd226fb17d2/machine-api-operator/0.log" Feb 23 09:21:06 crc kubenswrapper[4834]: I0223 09:21:06.017752 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-858654f9db-ls5wx_f1c43b16-4505-4745-8dd1-d3ccd4568121/cert-manager-controller/0.log" Feb 23 09:21:06 crc kubenswrapper[4834]: I0223 09:21:06.133197 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-cf98fcc89-r9vjb_cfdb4ce6-358f-4dd9-91e3-d322429bb391/cert-manager-cainjector/0.log" Feb 23 09:21:06 crc kubenswrapper[4834]: I0223 09:21:06.189778 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-687f57d79b-bftlt_14ec7de9-21a8-47f6-9ab9-bbce69d94ef8/cert-manager-webhook/0.log" Feb 23 09:21:32 crc kubenswrapper[4834]: I0223 09:21:32.468801 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lrv4x_6e4ac398-147a-4179-b898-bf5e8df2e333/extract-utilities/0.log" Feb 23 09:21:32 crc kubenswrapper[4834]: I0223 09:21:32.624150 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lrv4x_6e4ac398-147a-4179-b898-bf5e8df2e333/extract-content/0.log" Feb 23 09:21:32 crc kubenswrapper[4834]: I0223 09:21:32.641343 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lrv4x_6e4ac398-147a-4179-b898-bf5e8df2e333/extract-utilities/0.log" Feb 23 09:21:32 crc kubenswrapper[4834]: I0223 09:21:32.645147 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lrv4x_6e4ac398-147a-4179-b898-bf5e8df2e333/extract-content/0.log" Feb 23 09:21:32 crc kubenswrapper[4834]: I0223 09:21:32.807234 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lrv4x_6e4ac398-147a-4179-b898-bf5e8df2e333/extract-content/0.log" Feb 23 09:21:32 crc kubenswrapper[4834]: I0223 09:21:32.817926 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lrv4x_6e4ac398-147a-4179-b898-bf5e8df2e333/extract-utilities/0.log" Feb 23 09:21:32 crc kubenswrapper[4834]: I0223 09:21:32.944129 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-6kd6m_b775dec4-0c14-4ea4-b513-01fee306aa41/extract-utilities/0.log" Feb 23 09:21:32 crc kubenswrapper[4834]: I0223 09:21:32.944369 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-lrv4x_6e4ac398-147a-4179-b898-bf5e8df2e333/registry-server/0.log" Feb 23 09:21:33 crc kubenswrapper[4834]: I0223 09:21:33.107015 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-6kd6m_b775dec4-0c14-4ea4-b513-01fee306aa41/extract-content/0.log" Feb 23 09:21:33 crc kubenswrapper[4834]: I0223 09:21:33.121159 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-6kd6m_b775dec4-0c14-4ea4-b513-01fee306aa41/extract-content/0.log" Feb 23 09:21:33 crc kubenswrapper[4834]: I0223 09:21:33.135855 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-6kd6m_b775dec4-0c14-4ea4-b513-01fee306aa41/extract-utilities/0.log" Feb 23 09:21:33 crc kubenswrapper[4834]: I0223 09:21:33.277921 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-6kd6m_b775dec4-0c14-4ea4-b513-01fee306aa41/extract-content/0.log" Feb 23 09:21:33 crc kubenswrapper[4834]: I0223 09:21:33.301852 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-6kd6m_b775dec4-0c14-4ea4-b513-01fee306aa41/extract-utilities/0.log" Feb 23 09:21:33 crc kubenswrapper[4834]: I0223 09:21:33.362953 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-6kd6m_b775dec4-0c14-4ea4-b513-01fee306aa41/registry-server/0.log" Feb 23 09:21:33 crc kubenswrapper[4834]: I0223 09:21:33.469091 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-5954q_5bb580b6-b9ee-4c5c-9154-f6b58eac9ad8/marketplace-operator/0.log" Feb 23 09:21:33 crc kubenswrapper[4834]: I0223 09:21:33.533123 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-b2sb8_6f51e05f-82f7-4011-8801-0d387547ba12/extract-utilities/0.log" Feb 23 09:21:33 crc kubenswrapper[4834]: I0223 09:21:33.664377 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-b2sb8_6f51e05f-82f7-4011-8801-0d387547ba12/extract-content/0.log" Feb 23 09:21:33 crc kubenswrapper[4834]: I0223 09:21:33.671333 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-b2sb8_6f51e05f-82f7-4011-8801-0d387547ba12/extract-utilities/0.log" Feb 23 09:21:33 crc kubenswrapper[4834]: I0223 09:21:33.699432 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-b2sb8_6f51e05f-82f7-4011-8801-0d387547ba12/extract-content/0.log" Feb 23 09:21:33 crc kubenswrapper[4834]: I0223 09:21:33.888766 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-b2sb8_6f51e05f-82f7-4011-8801-0d387547ba12/extract-content/0.log" Feb 23 09:21:33 crc kubenswrapper[4834]: I0223 09:21:33.894657 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-b2sb8_6f51e05f-82f7-4011-8801-0d387547ba12/extract-utilities/0.log" Feb 23 09:21:33 crc kubenswrapper[4834]: I0223 09:21:33.899747 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-b2sb8_6f51e05f-82f7-4011-8801-0d387547ba12/registry-server/0.log" Feb 23 09:21:34 crc kubenswrapper[4834]: I0223 09:21:34.029713 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hzj74_98fa75d2-5267-47d5-9640-881c6f8ce155/extract-utilities/0.log" Feb 23 09:21:34 crc kubenswrapper[4834]: I0223 09:21:34.203278 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hzj74_98fa75d2-5267-47d5-9640-881c6f8ce155/extract-content/0.log" Feb 23 09:21:34 crc kubenswrapper[4834]: I0223 09:21:34.208701 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hzj74_98fa75d2-5267-47d5-9640-881c6f8ce155/extract-utilities/0.log" Feb 23 09:21:34 crc kubenswrapper[4834]: I0223 09:21:34.212745 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hzj74_98fa75d2-5267-47d5-9640-881c6f8ce155/extract-content/0.log" Feb 23 09:21:34 crc kubenswrapper[4834]: I0223 09:21:34.329427 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hzj74_98fa75d2-5267-47d5-9640-881c6f8ce155/extract-utilities/0.log" Feb 23 09:21:34 crc kubenswrapper[4834]: I0223 09:21:34.347732 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hzj74_98fa75d2-5267-47d5-9640-881c6f8ce155/extract-content/0.log" Feb 23 09:21:34 crc kubenswrapper[4834]: I0223 09:21:34.389553 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-hzj74_98fa75d2-5267-47d5-9640-881c6f8ce155/registry-server/0.log" Feb 23 09:21:35 crc kubenswrapper[4834]: I0223 09:21:35.439748 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wqxsz"] Feb 23 09:21:35 crc kubenswrapper[4834]: E0223 09:21:35.440390 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd11248f-d62a-4a4d-8627-1f17058bd6be" containerName="registry-server" Feb 23 09:21:35 crc kubenswrapper[4834]: I0223 09:21:35.440442 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd11248f-d62a-4a4d-8627-1f17058bd6be" containerName="registry-server" Feb 23 09:21:35 crc kubenswrapper[4834]: E0223 09:21:35.440465 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd11248f-d62a-4a4d-8627-1f17058bd6be" containerName="extract-content" Feb 23 09:21:35 crc kubenswrapper[4834]: I0223 09:21:35.440478 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd11248f-d62a-4a4d-8627-1f17058bd6be" containerName="extract-content" Feb 23 09:21:35 crc kubenswrapper[4834]: E0223 09:21:35.440513 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd11248f-d62a-4a4d-8627-1f17058bd6be" containerName="extract-utilities" Feb 23 09:21:35 crc kubenswrapper[4834]: I0223 09:21:35.440527 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd11248f-d62a-4a4d-8627-1f17058bd6be" containerName="extract-utilities" Feb 23 09:21:35 crc kubenswrapper[4834]: I0223 09:21:35.440701 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd11248f-d62a-4a4d-8627-1f17058bd6be" containerName="registry-server" Feb 23 09:21:35 crc kubenswrapper[4834]: I0223 09:21:35.442093 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wqxsz" Feb 23 09:21:35 crc kubenswrapper[4834]: I0223 09:21:35.450619 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wqxsz"] Feb 23 09:21:35 crc kubenswrapper[4834]: I0223 09:21:35.533657 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/265ef484-8fd2-455b-a69a-6b195a32a690-utilities\") pod \"certified-operators-wqxsz\" (UID: \"265ef484-8fd2-455b-a69a-6b195a32a690\") " pod="openshift-marketplace/certified-operators-wqxsz" Feb 23 09:21:35 crc kubenswrapper[4834]: I0223 09:21:35.533725 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/265ef484-8fd2-455b-a69a-6b195a32a690-catalog-content\") pod \"certified-operators-wqxsz\" (UID: \"265ef484-8fd2-455b-a69a-6b195a32a690\") " pod="openshift-marketplace/certified-operators-wqxsz" Feb 23 09:21:35 crc kubenswrapper[4834]: I0223 09:21:35.533770 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfz26\" (UniqueName: \"kubernetes.io/projected/265ef484-8fd2-455b-a69a-6b195a32a690-kube-api-access-hfz26\") pod \"certified-operators-wqxsz\" (UID: \"265ef484-8fd2-455b-a69a-6b195a32a690\") " pod="openshift-marketplace/certified-operators-wqxsz" Feb 23 09:21:35 crc kubenswrapper[4834]: I0223 09:21:35.634677 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/265ef484-8fd2-455b-a69a-6b195a32a690-utilities\") pod \"certified-operators-wqxsz\" (UID: \"265ef484-8fd2-455b-a69a-6b195a32a690\") " pod="openshift-marketplace/certified-operators-wqxsz" Feb 23 09:21:35 crc kubenswrapper[4834]: I0223 09:21:35.634749 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/265ef484-8fd2-455b-a69a-6b195a32a690-catalog-content\") pod \"certified-operators-wqxsz\" (UID: \"265ef484-8fd2-455b-a69a-6b195a32a690\") " pod="openshift-marketplace/certified-operators-wqxsz" Feb 23 09:21:35 crc kubenswrapper[4834]: I0223 09:21:35.634777 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hfz26\" (UniqueName: \"kubernetes.io/projected/265ef484-8fd2-455b-a69a-6b195a32a690-kube-api-access-hfz26\") pod \"certified-operators-wqxsz\" (UID: \"265ef484-8fd2-455b-a69a-6b195a32a690\") " pod="openshift-marketplace/certified-operators-wqxsz" Feb 23 09:21:35 crc kubenswrapper[4834]: I0223 09:21:35.635523 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/265ef484-8fd2-455b-a69a-6b195a32a690-catalog-content\") pod \"certified-operators-wqxsz\" (UID: \"265ef484-8fd2-455b-a69a-6b195a32a690\") " pod="openshift-marketplace/certified-operators-wqxsz" Feb 23 09:21:35 crc kubenswrapper[4834]: I0223 09:21:35.635583 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/265ef484-8fd2-455b-a69a-6b195a32a690-utilities\") pod \"certified-operators-wqxsz\" (UID: \"265ef484-8fd2-455b-a69a-6b195a32a690\") " pod="openshift-marketplace/certified-operators-wqxsz" Feb 23 09:21:35 crc kubenswrapper[4834]: I0223 09:21:35.655257 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hfz26\" (UniqueName: \"kubernetes.io/projected/265ef484-8fd2-455b-a69a-6b195a32a690-kube-api-access-hfz26\") pod \"certified-operators-wqxsz\" (UID: \"265ef484-8fd2-455b-a69a-6b195a32a690\") " pod="openshift-marketplace/certified-operators-wqxsz" Feb 23 09:21:35 crc kubenswrapper[4834]: I0223 09:21:35.765893 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wqxsz" Feb 23 09:21:36 crc kubenswrapper[4834]: I0223 09:21:36.200823 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wqxsz"] Feb 23 09:21:37 crc kubenswrapper[4834]: I0223 09:21:37.173877 4834 generic.go:334] "Generic (PLEG): container finished" podID="265ef484-8fd2-455b-a69a-6b195a32a690" containerID="8b70396d6aefe1774bf1fcd7194a68c00c786bb9b961fa1fa99807fcafeaa7a9" exitCode=0 Feb 23 09:21:37 crc kubenswrapper[4834]: I0223 09:21:37.174008 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wqxsz" event={"ID":"265ef484-8fd2-455b-a69a-6b195a32a690","Type":"ContainerDied","Data":"8b70396d6aefe1774bf1fcd7194a68c00c786bb9b961fa1fa99807fcafeaa7a9"} Feb 23 09:21:37 crc kubenswrapper[4834]: I0223 09:21:37.176690 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wqxsz" event={"ID":"265ef484-8fd2-455b-a69a-6b195a32a690","Type":"ContainerStarted","Data":"f0037fc154d6dc27b42f3e1661dace54099b4013da7ac4a692d968ffb8532d80"} Feb 23 09:21:38 crc kubenswrapper[4834]: I0223 09:21:38.183207 4834 generic.go:334] "Generic (PLEG): container finished" podID="265ef484-8fd2-455b-a69a-6b195a32a690" containerID="13ef7b55843d1b5582e0d767f69599103b3ee580ce8826eb6ce0dde04125d770" exitCode=0 Feb 23 09:21:38 crc kubenswrapper[4834]: I0223 09:21:38.183438 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wqxsz" event={"ID":"265ef484-8fd2-455b-a69a-6b195a32a690","Type":"ContainerDied","Data":"13ef7b55843d1b5582e0d767f69599103b3ee580ce8826eb6ce0dde04125d770"} Feb 23 09:21:39 crc kubenswrapper[4834]: I0223 09:21:39.191073 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wqxsz" event={"ID":"265ef484-8fd2-455b-a69a-6b195a32a690","Type":"ContainerStarted","Data":"e77c27c17b12bf89a37b1723a9ec3d8e28b334dc2180d9c55a87fbe5fbc3e289"} Feb 23 09:21:39 crc kubenswrapper[4834]: I0223 09:21:39.208098 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wqxsz" podStartSLOduration=2.827008624 podStartE2EDuration="4.208080865s" podCreationTimestamp="2026-02-23 09:21:35 +0000 UTC" firstStartedPulling="2026-02-23 09:21:37.175585642 +0000 UTC m=+833.253900039" lastFinishedPulling="2026-02-23 09:21:38.556657893 +0000 UTC m=+834.634972280" observedRunningTime="2026-02-23 09:21:39.207451478 +0000 UTC m=+835.285765915" watchObservedRunningTime="2026-02-23 09:21:39.208080865 +0000 UTC m=+835.286395252" Feb 23 09:21:45 crc kubenswrapper[4834]: I0223 09:21:45.766019 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wqxsz" Feb 23 09:21:45 crc kubenswrapper[4834]: I0223 09:21:45.767629 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wqxsz" Feb 23 09:21:45 crc kubenswrapper[4834]: I0223 09:21:45.806085 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wqxsz" Feb 23 09:21:46 crc kubenswrapper[4834]: I0223 09:21:46.266332 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wqxsz" Feb 23 09:21:46 crc kubenswrapper[4834]: I0223 09:21:46.303365 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wqxsz"] Feb 23 09:21:48 crc kubenswrapper[4834]: I0223 09:21:48.244697 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wqxsz" podUID="265ef484-8fd2-455b-a69a-6b195a32a690" containerName="registry-server" containerID="cri-o://e77c27c17b12bf89a37b1723a9ec3d8e28b334dc2180d9c55a87fbe5fbc3e289" gracePeriod=2 Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.103971 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wqxsz" Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.126106 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/265ef484-8fd2-455b-a69a-6b195a32a690-utilities\") pod \"265ef484-8fd2-455b-a69a-6b195a32a690\" (UID: \"265ef484-8fd2-455b-a69a-6b195a32a690\") " Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.126179 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hfz26\" (UniqueName: \"kubernetes.io/projected/265ef484-8fd2-455b-a69a-6b195a32a690-kube-api-access-hfz26\") pod \"265ef484-8fd2-455b-a69a-6b195a32a690\" (UID: \"265ef484-8fd2-455b-a69a-6b195a32a690\") " Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.126281 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/265ef484-8fd2-455b-a69a-6b195a32a690-catalog-content\") pod \"265ef484-8fd2-455b-a69a-6b195a32a690\" (UID: \"265ef484-8fd2-455b-a69a-6b195a32a690\") " Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.129285 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/265ef484-8fd2-455b-a69a-6b195a32a690-utilities" (OuterVolumeSpecName: "utilities") pod "265ef484-8fd2-455b-a69a-6b195a32a690" (UID: "265ef484-8fd2-455b-a69a-6b195a32a690"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.139043 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/265ef484-8fd2-455b-a69a-6b195a32a690-kube-api-access-hfz26" (OuterVolumeSpecName: "kube-api-access-hfz26") pod "265ef484-8fd2-455b-a69a-6b195a32a690" (UID: "265ef484-8fd2-455b-a69a-6b195a32a690"). InnerVolumeSpecName "kube-api-access-hfz26". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.191888 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/265ef484-8fd2-455b-a69a-6b195a32a690-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "265ef484-8fd2-455b-a69a-6b195a32a690" (UID: "265ef484-8fd2-455b-a69a-6b195a32a690"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.228087 4834 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/265ef484-8fd2-455b-a69a-6b195a32a690-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.228119 4834 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/265ef484-8fd2-455b-a69a-6b195a32a690-utilities\") on node \"crc\" DevicePath \"\"" Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.228129 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hfz26\" (UniqueName: \"kubernetes.io/projected/265ef484-8fd2-455b-a69a-6b195a32a690-kube-api-access-hfz26\") on node \"crc\" DevicePath \"\"" Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.263720 4834 generic.go:334] "Generic (PLEG): container finished" podID="265ef484-8fd2-455b-a69a-6b195a32a690" containerID="e77c27c17b12bf89a37b1723a9ec3d8e28b334dc2180d9c55a87fbe5fbc3e289" exitCode=0 Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.263797 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wqxsz" event={"ID":"265ef484-8fd2-455b-a69a-6b195a32a690","Type":"ContainerDied","Data":"e77c27c17b12bf89a37b1723a9ec3d8e28b334dc2180d9c55a87fbe5fbc3e289"} Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.263949 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wqxsz" Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.264939 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wqxsz" event={"ID":"265ef484-8fd2-455b-a69a-6b195a32a690","Type":"ContainerDied","Data":"f0037fc154d6dc27b42f3e1661dace54099b4013da7ac4a692d968ffb8532d80"} Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.265006 4834 scope.go:117] "RemoveContainer" containerID="e77c27c17b12bf89a37b1723a9ec3d8e28b334dc2180d9c55a87fbe5fbc3e289" Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.295110 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wqxsz"] Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.299190 4834 scope.go:117] "RemoveContainer" containerID="13ef7b55843d1b5582e0d767f69599103b3ee580ce8826eb6ce0dde04125d770" Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.302694 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wqxsz"] Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.339049 4834 scope.go:117] "RemoveContainer" containerID="8b70396d6aefe1774bf1fcd7194a68c00c786bb9b961fa1fa99807fcafeaa7a9" Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.353966 4834 scope.go:117] "RemoveContainer" containerID="e77c27c17b12bf89a37b1723a9ec3d8e28b334dc2180d9c55a87fbe5fbc3e289" Feb 23 09:21:49 crc kubenswrapper[4834]: E0223 09:21:49.363232 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e77c27c17b12bf89a37b1723a9ec3d8e28b334dc2180d9c55a87fbe5fbc3e289\": container with ID starting with e77c27c17b12bf89a37b1723a9ec3d8e28b334dc2180d9c55a87fbe5fbc3e289 not found: ID does not exist" containerID="e77c27c17b12bf89a37b1723a9ec3d8e28b334dc2180d9c55a87fbe5fbc3e289" Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.363479 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e77c27c17b12bf89a37b1723a9ec3d8e28b334dc2180d9c55a87fbe5fbc3e289"} err="failed to get container status \"e77c27c17b12bf89a37b1723a9ec3d8e28b334dc2180d9c55a87fbe5fbc3e289\": rpc error: code = NotFound desc = could not find container \"e77c27c17b12bf89a37b1723a9ec3d8e28b334dc2180d9c55a87fbe5fbc3e289\": container with ID starting with e77c27c17b12bf89a37b1723a9ec3d8e28b334dc2180d9c55a87fbe5fbc3e289 not found: ID does not exist" Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.363519 4834 scope.go:117] "RemoveContainer" containerID="13ef7b55843d1b5582e0d767f69599103b3ee580ce8826eb6ce0dde04125d770" Feb 23 09:21:49 crc kubenswrapper[4834]: E0223 09:21:49.364070 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13ef7b55843d1b5582e0d767f69599103b3ee580ce8826eb6ce0dde04125d770\": container with ID starting with 13ef7b55843d1b5582e0d767f69599103b3ee580ce8826eb6ce0dde04125d770 not found: ID does not exist" containerID="13ef7b55843d1b5582e0d767f69599103b3ee580ce8826eb6ce0dde04125d770" Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.364101 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13ef7b55843d1b5582e0d767f69599103b3ee580ce8826eb6ce0dde04125d770"} err="failed to get container status \"13ef7b55843d1b5582e0d767f69599103b3ee580ce8826eb6ce0dde04125d770\": rpc error: code = NotFound desc = could not find container \"13ef7b55843d1b5582e0d767f69599103b3ee580ce8826eb6ce0dde04125d770\": container with ID starting with 13ef7b55843d1b5582e0d767f69599103b3ee580ce8826eb6ce0dde04125d770 not found: ID does not exist" Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.364123 4834 scope.go:117] "RemoveContainer" containerID="8b70396d6aefe1774bf1fcd7194a68c00c786bb9b961fa1fa99807fcafeaa7a9" Feb 23 09:21:49 crc kubenswrapper[4834]: E0223 09:21:49.364490 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b70396d6aefe1774bf1fcd7194a68c00c786bb9b961fa1fa99807fcafeaa7a9\": container with ID starting with 8b70396d6aefe1774bf1fcd7194a68c00c786bb9b961fa1fa99807fcafeaa7a9 not found: ID does not exist" containerID="8b70396d6aefe1774bf1fcd7194a68c00c786bb9b961fa1fa99807fcafeaa7a9" Feb 23 09:21:49 crc kubenswrapper[4834]: I0223 09:21:49.364547 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b70396d6aefe1774bf1fcd7194a68c00c786bb9b961fa1fa99807fcafeaa7a9"} err="failed to get container status \"8b70396d6aefe1774bf1fcd7194a68c00c786bb9b961fa1fa99807fcafeaa7a9\": rpc error: code = NotFound desc = could not find container \"8b70396d6aefe1774bf1fcd7194a68c00c786bb9b961fa1fa99807fcafeaa7a9\": container with ID starting with 8b70396d6aefe1774bf1fcd7194a68c00c786bb9b961fa1fa99807fcafeaa7a9 not found: ID does not exist" Feb 23 09:21:50 crc kubenswrapper[4834]: I0223 09:21:50.597028 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="265ef484-8fd2-455b-a69a-6b195a32a690" path="/var/lib/kubelet/pods/265ef484-8fd2-455b-a69a-6b195a32a690/volumes" Feb 23 09:21:57 crc kubenswrapper[4834]: I0223 09:21:57.810842 4834 patch_prober.go:28] interesting pod/machine-config-daemon-kt9lp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 23 09:21:57 crc kubenswrapper[4834]: I0223 09:21:57.811489 4834 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" podUID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 23 09:22:27 crc kubenswrapper[4834]: I0223 09:22:27.810320 4834 patch_prober.go:28] interesting pod/machine-config-daemon-kt9lp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 23 09:22:27 crc kubenswrapper[4834]: I0223 09:22:27.810932 4834 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" podUID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 23 09:22:38 crc kubenswrapper[4834]: I0223 09:22:38.567080 4834 generic.go:334] "Generic (PLEG): container finished" podID="36931913-6ce1-4442-9640-194bcf85a32c" containerID="55bb2427e85aa0de31fe2d5f45aba723f981f78f6756c29fbec8f903401fd241" exitCode=0 Feb 23 09:22:38 crc kubenswrapper[4834]: I0223 09:22:38.567184 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-wl2ws/must-gather-p9n75" event={"ID":"36931913-6ce1-4442-9640-194bcf85a32c","Type":"ContainerDied","Data":"55bb2427e85aa0de31fe2d5f45aba723f981f78f6756c29fbec8f903401fd241"} Feb 23 09:22:38 crc kubenswrapper[4834]: I0223 09:22:38.568205 4834 scope.go:117] "RemoveContainer" containerID="55bb2427e85aa0de31fe2d5f45aba723f981f78f6756c29fbec8f903401fd241" Feb 23 09:22:38 crc kubenswrapper[4834]: I0223 09:22:38.847501 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-wl2ws_must-gather-p9n75_36931913-6ce1-4442-9640-194bcf85a32c/gather/0.log" Feb 23 09:22:41 crc kubenswrapper[4834]: I0223 09:22:41.621325 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-kc8m2"] Feb 23 09:22:41 crc kubenswrapper[4834]: E0223 09:22:41.621886 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="265ef484-8fd2-455b-a69a-6b195a32a690" containerName="extract-content" Feb 23 09:22:41 crc kubenswrapper[4834]: I0223 09:22:41.621897 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="265ef484-8fd2-455b-a69a-6b195a32a690" containerName="extract-content" Feb 23 09:22:41 crc kubenswrapper[4834]: E0223 09:22:41.621907 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="265ef484-8fd2-455b-a69a-6b195a32a690" containerName="extract-utilities" Feb 23 09:22:41 crc kubenswrapper[4834]: I0223 09:22:41.621913 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="265ef484-8fd2-455b-a69a-6b195a32a690" containerName="extract-utilities" Feb 23 09:22:41 crc kubenswrapper[4834]: E0223 09:22:41.621922 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="265ef484-8fd2-455b-a69a-6b195a32a690" containerName="registry-server" Feb 23 09:22:41 crc kubenswrapper[4834]: I0223 09:22:41.621928 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="265ef484-8fd2-455b-a69a-6b195a32a690" containerName="registry-server" Feb 23 09:22:41 crc kubenswrapper[4834]: I0223 09:22:41.622038 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="265ef484-8fd2-455b-a69a-6b195a32a690" containerName="registry-server" Feb 23 09:22:41 crc kubenswrapper[4834]: I0223 09:22:41.622735 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kc8m2" Feb 23 09:22:41 crc kubenswrapper[4834]: I0223 09:22:41.636049 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kc8m2"] Feb 23 09:22:41 crc kubenswrapper[4834]: I0223 09:22:41.678809 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4579d53-a261-495a-93a6-96ea3db448b1-utilities\") pod \"redhat-operators-kc8m2\" (UID: \"a4579d53-a261-495a-93a6-96ea3db448b1\") " pod="openshift-marketplace/redhat-operators-kc8m2" Feb 23 09:22:41 crc kubenswrapper[4834]: I0223 09:22:41.678874 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94nbw\" (UniqueName: \"kubernetes.io/projected/a4579d53-a261-495a-93a6-96ea3db448b1-kube-api-access-94nbw\") pod \"redhat-operators-kc8m2\" (UID: \"a4579d53-a261-495a-93a6-96ea3db448b1\") " pod="openshift-marketplace/redhat-operators-kc8m2" Feb 23 09:22:41 crc kubenswrapper[4834]: I0223 09:22:41.678941 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4579d53-a261-495a-93a6-96ea3db448b1-catalog-content\") pod \"redhat-operators-kc8m2\" (UID: \"a4579d53-a261-495a-93a6-96ea3db448b1\") " pod="openshift-marketplace/redhat-operators-kc8m2" Feb 23 09:22:41 crc kubenswrapper[4834]: I0223 09:22:41.780206 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4579d53-a261-495a-93a6-96ea3db448b1-utilities\") pod \"redhat-operators-kc8m2\" (UID: \"a4579d53-a261-495a-93a6-96ea3db448b1\") " pod="openshift-marketplace/redhat-operators-kc8m2" Feb 23 09:22:41 crc kubenswrapper[4834]: I0223 09:22:41.780279 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-94nbw\" (UniqueName: \"kubernetes.io/projected/a4579d53-a261-495a-93a6-96ea3db448b1-kube-api-access-94nbw\") pod \"redhat-operators-kc8m2\" (UID: \"a4579d53-a261-495a-93a6-96ea3db448b1\") " pod="openshift-marketplace/redhat-operators-kc8m2" Feb 23 09:22:41 crc kubenswrapper[4834]: I0223 09:22:41.780344 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4579d53-a261-495a-93a6-96ea3db448b1-catalog-content\") pod \"redhat-operators-kc8m2\" (UID: \"a4579d53-a261-495a-93a6-96ea3db448b1\") " pod="openshift-marketplace/redhat-operators-kc8m2" Feb 23 09:22:41 crc kubenswrapper[4834]: I0223 09:22:41.780854 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4579d53-a261-495a-93a6-96ea3db448b1-catalog-content\") pod \"redhat-operators-kc8m2\" (UID: \"a4579d53-a261-495a-93a6-96ea3db448b1\") " pod="openshift-marketplace/redhat-operators-kc8m2" Feb 23 09:22:41 crc kubenswrapper[4834]: I0223 09:22:41.781529 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4579d53-a261-495a-93a6-96ea3db448b1-utilities\") pod \"redhat-operators-kc8m2\" (UID: \"a4579d53-a261-495a-93a6-96ea3db448b1\") " pod="openshift-marketplace/redhat-operators-kc8m2" Feb 23 09:22:41 crc kubenswrapper[4834]: I0223 09:22:41.822495 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-94nbw\" (UniqueName: \"kubernetes.io/projected/a4579d53-a261-495a-93a6-96ea3db448b1-kube-api-access-94nbw\") pod \"redhat-operators-kc8m2\" (UID: \"a4579d53-a261-495a-93a6-96ea3db448b1\") " pod="openshift-marketplace/redhat-operators-kc8m2" Feb 23 09:22:41 crc kubenswrapper[4834]: I0223 09:22:41.944895 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kc8m2" Feb 23 09:22:42 crc kubenswrapper[4834]: I0223 09:22:42.138598 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-kc8m2"] Feb 23 09:22:42 crc kubenswrapper[4834]: W0223 09:22:42.144986 4834 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda4579d53_a261_495a_93a6_96ea3db448b1.slice/crio-e9e7862af02f44b7c98d7cd3338131dab72f499ddfc025403bb79af9b804bea2 WatchSource:0}: Error finding container e9e7862af02f44b7c98d7cd3338131dab72f499ddfc025403bb79af9b804bea2: Status 404 returned error can't find the container with id e9e7862af02f44b7c98d7cd3338131dab72f499ddfc025403bb79af9b804bea2 Feb 23 09:22:42 crc kubenswrapper[4834]: I0223 09:22:42.591940 4834 generic.go:334] "Generic (PLEG): container finished" podID="a4579d53-a261-495a-93a6-96ea3db448b1" containerID="ca3d9dbc3da0b5c31bacb4d1ea0e381a661a0a441ff64d91ab6ea2b1a623783b" exitCode=0 Feb 23 09:22:42 crc kubenswrapper[4834]: I0223 09:22:42.593794 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kc8m2" event={"ID":"a4579d53-a261-495a-93a6-96ea3db448b1","Type":"ContainerDied","Data":"ca3d9dbc3da0b5c31bacb4d1ea0e381a661a0a441ff64d91ab6ea2b1a623783b"} Feb 23 09:22:42 crc kubenswrapper[4834]: I0223 09:22:42.593838 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kc8m2" event={"ID":"a4579d53-a261-495a-93a6-96ea3db448b1","Type":"ContainerStarted","Data":"e9e7862af02f44b7c98d7cd3338131dab72f499ddfc025403bb79af9b804bea2"} Feb 23 09:22:42 crc kubenswrapper[4834]: I0223 09:22:42.594765 4834 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Feb 23 09:22:43 crc kubenswrapper[4834]: I0223 09:22:43.601097 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kc8m2" event={"ID":"a4579d53-a261-495a-93a6-96ea3db448b1","Type":"ContainerStarted","Data":"b7f484b275940bf291d6f1a3c7e1069adef48360817bd0463243f588d4cd2f3f"} Feb 23 09:22:44 crc kubenswrapper[4834]: I0223 09:22:44.616208 4834 generic.go:334] "Generic (PLEG): container finished" podID="a4579d53-a261-495a-93a6-96ea3db448b1" containerID="b7f484b275940bf291d6f1a3c7e1069adef48360817bd0463243f588d4cd2f3f" exitCode=0 Feb 23 09:22:44 crc kubenswrapper[4834]: I0223 09:22:44.616259 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kc8m2" event={"ID":"a4579d53-a261-495a-93a6-96ea3db448b1","Type":"ContainerDied","Data":"b7f484b275940bf291d6f1a3c7e1069adef48360817bd0463243f588d4cd2f3f"} Feb 23 09:22:45 crc kubenswrapper[4834]: I0223 09:22:45.624366 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kc8m2" event={"ID":"a4579d53-a261-495a-93a6-96ea3db448b1","Type":"ContainerStarted","Data":"185f0a3d8a74ba1baea95fa04da5570398b513d4bc1931993d14e3501749d35d"} Feb 23 09:22:45 crc kubenswrapper[4834]: I0223 09:22:45.671585 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-kc8m2" podStartSLOduration=2.276914816 podStartE2EDuration="4.671568451s" podCreationTimestamp="2026-02-23 09:22:41 +0000 UTC" firstStartedPulling="2026-02-23 09:22:42.594543744 +0000 UTC m=+898.672858131" lastFinishedPulling="2026-02-23 09:22:44.989197369 +0000 UTC m=+901.067511766" observedRunningTime="2026-02-23 09:22:45.642928607 +0000 UTC m=+901.721242994" watchObservedRunningTime="2026-02-23 09:22:45.671568451 +0000 UTC m=+901.749882838" Feb 23 09:22:45 crc kubenswrapper[4834]: I0223 09:22:45.672685 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-wl2ws/must-gather-p9n75"] Feb 23 09:22:45 crc kubenswrapper[4834]: I0223 09:22:45.673117 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-wl2ws/must-gather-p9n75" podUID="36931913-6ce1-4442-9640-194bcf85a32c" containerName="copy" containerID="cri-o://f07f53cddcea40aa352433e38b2f146d3bf02d5c2167d73c49d1b93414e7cc17" gracePeriod=2 Feb 23 09:22:45 crc kubenswrapper[4834]: I0223 09:22:45.676022 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-wl2ws/must-gather-p9n75"] Feb 23 09:22:46 crc kubenswrapper[4834]: I0223 09:22:46.001031 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-wl2ws_must-gather-p9n75_36931913-6ce1-4442-9640-194bcf85a32c/copy/0.log" Feb 23 09:22:46 crc kubenswrapper[4834]: I0223 09:22:46.003044 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-wl2ws/must-gather-p9n75" Feb 23 09:22:46 crc kubenswrapper[4834]: I0223 09:22:46.025033 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zk5n8\" (UniqueName: \"kubernetes.io/projected/36931913-6ce1-4442-9640-194bcf85a32c-kube-api-access-zk5n8\") pod \"36931913-6ce1-4442-9640-194bcf85a32c\" (UID: \"36931913-6ce1-4442-9640-194bcf85a32c\") " Feb 23 09:22:46 crc kubenswrapper[4834]: I0223 09:22:46.025126 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/36931913-6ce1-4442-9640-194bcf85a32c-must-gather-output\") pod \"36931913-6ce1-4442-9640-194bcf85a32c\" (UID: \"36931913-6ce1-4442-9640-194bcf85a32c\") " Feb 23 09:22:46 crc kubenswrapper[4834]: I0223 09:22:46.031657 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36931913-6ce1-4442-9640-194bcf85a32c-kube-api-access-zk5n8" (OuterVolumeSpecName: "kube-api-access-zk5n8") pod "36931913-6ce1-4442-9640-194bcf85a32c" (UID: "36931913-6ce1-4442-9640-194bcf85a32c"). InnerVolumeSpecName "kube-api-access-zk5n8". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:22:46 crc kubenswrapper[4834]: I0223 09:22:46.073980 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36931913-6ce1-4442-9640-194bcf85a32c-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "36931913-6ce1-4442-9640-194bcf85a32c" (UID: "36931913-6ce1-4442-9640-194bcf85a32c"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:22:46 crc kubenswrapper[4834]: I0223 09:22:46.127650 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zk5n8\" (UniqueName: \"kubernetes.io/projected/36931913-6ce1-4442-9640-194bcf85a32c-kube-api-access-zk5n8\") on node \"crc\" DevicePath \"\"" Feb 23 09:22:46 crc kubenswrapper[4834]: I0223 09:22:46.130475 4834 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/36931913-6ce1-4442-9640-194bcf85a32c-must-gather-output\") on node \"crc\" DevicePath \"\"" Feb 23 09:22:46 crc kubenswrapper[4834]: I0223 09:22:46.596485 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36931913-6ce1-4442-9640-194bcf85a32c" path="/var/lib/kubelet/pods/36931913-6ce1-4442-9640-194bcf85a32c/volumes" Feb 23 09:22:46 crc kubenswrapper[4834]: I0223 09:22:46.632290 4834 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-wl2ws_must-gather-p9n75_36931913-6ce1-4442-9640-194bcf85a32c/copy/0.log" Feb 23 09:22:46 crc kubenswrapper[4834]: I0223 09:22:46.632841 4834 generic.go:334] "Generic (PLEG): container finished" podID="36931913-6ce1-4442-9640-194bcf85a32c" containerID="f07f53cddcea40aa352433e38b2f146d3bf02d5c2167d73c49d1b93414e7cc17" exitCode=143 Feb 23 09:22:46 crc kubenswrapper[4834]: I0223 09:22:46.632924 4834 scope.go:117] "RemoveContainer" containerID="f07f53cddcea40aa352433e38b2f146d3bf02d5c2167d73c49d1b93414e7cc17" Feb 23 09:22:46 crc kubenswrapper[4834]: I0223 09:22:46.632948 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-wl2ws/must-gather-p9n75" Feb 23 09:22:46 crc kubenswrapper[4834]: I0223 09:22:46.654986 4834 scope.go:117] "RemoveContainer" containerID="55bb2427e85aa0de31fe2d5f45aba723f981f78f6756c29fbec8f903401fd241" Feb 23 09:22:46 crc kubenswrapper[4834]: I0223 09:22:46.685869 4834 scope.go:117] "RemoveContainer" containerID="f07f53cddcea40aa352433e38b2f146d3bf02d5c2167d73c49d1b93414e7cc17" Feb 23 09:22:46 crc kubenswrapper[4834]: E0223 09:22:46.686886 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f07f53cddcea40aa352433e38b2f146d3bf02d5c2167d73c49d1b93414e7cc17\": container with ID starting with f07f53cddcea40aa352433e38b2f146d3bf02d5c2167d73c49d1b93414e7cc17 not found: ID does not exist" containerID="f07f53cddcea40aa352433e38b2f146d3bf02d5c2167d73c49d1b93414e7cc17" Feb 23 09:22:46 crc kubenswrapper[4834]: I0223 09:22:46.686934 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f07f53cddcea40aa352433e38b2f146d3bf02d5c2167d73c49d1b93414e7cc17"} err="failed to get container status \"f07f53cddcea40aa352433e38b2f146d3bf02d5c2167d73c49d1b93414e7cc17\": rpc error: code = NotFound desc = could not find container \"f07f53cddcea40aa352433e38b2f146d3bf02d5c2167d73c49d1b93414e7cc17\": container with ID starting with f07f53cddcea40aa352433e38b2f146d3bf02d5c2167d73c49d1b93414e7cc17 not found: ID does not exist" Feb 23 09:22:46 crc kubenswrapper[4834]: I0223 09:22:46.686966 4834 scope.go:117] "RemoveContainer" containerID="55bb2427e85aa0de31fe2d5f45aba723f981f78f6756c29fbec8f903401fd241" Feb 23 09:22:46 crc kubenswrapper[4834]: E0223 09:22:46.687290 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55bb2427e85aa0de31fe2d5f45aba723f981f78f6756c29fbec8f903401fd241\": container with ID starting with 55bb2427e85aa0de31fe2d5f45aba723f981f78f6756c29fbec8f903401fd241 not found: ID does not exist" containerID="55bb2427e85aa0de31fe2d5f45aba723f981f78f6756c29fbec8f903401fd241" Feb 23 09:22:46 crc kubenswrapper[4834]: I0223 09:22:46.687318 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55bb2427e85aa0de31fe2d5f45aba723f981f78f6756c29fbec8f903401fd241"} err="failed to get container status \"55bb2427e85aa0de31fe2d5f45aba723f981f78f6756c29fbec8f903401fd241\": rpc error: code = NotFound desc = could not find container \"55bb2427e85aa0de31fe2d5f45aba723f981f78f6756c29fbec8f903401fd241\": container with ID starting with 55bb2427e85aa0de31fe2d5f45aba723f981f78f6756c29fbec8f903401fd241 not found: ID does not exist" Feb 23 09:22:51 crc kubenswrapper[4834]: I0223 09:22:51.945348 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-kc8m2" Feb 23 09:22:51 crc kubenswrapper[4834]: I0223 09:22:51.945857 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-kc8m2" Feb 23 09:22:51 crc kubenswrapper[4834]: I0223 09:22:51.981435 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-kc8m2" Feb 23 09:22:52 crc kubenswrapper[4834]: I0223 09:22:52.721737 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-kc8m2" Feb 23 09:22:52 crc kubenswrapper[4834]: I0223 09:22:52.766541 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kc8m2"] Feb 23 09:22:54 crc kubenswrapper[4834]: I0223 09:22:54.678060 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-kc8m2" podUID="a4579d53-a261-495a-93a6-96ea3db448b1" containerName="registry-server" containerID="cri-o://185f0a3d8a74ba1baea95fa04da5570398b513d4bc1931993d14e3501749d35d" gracePeriod=2 Feb 23 09:22:55 crc kubenswrapper[4834]: E0223 09:22:55.643920 4834 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda4579d53_a261_495a_93a6_96ea3db448b1.slice/crio-conmon-185f0a3d8a74ba1baea95fa04da5570398b513d4bc1931993d14e3501749d35d.scope\": RecentStats: unable to find data in memory cache]" Feb 23 09:22:55 crc kubenswrapper[4834]: I0223 09:22:55.684822 4834 generic.go:334] "Generic (PLEG): container finished" podID="a4579d53-a261-495a-93a6-96ea3db448b1" containerID="185f0a3d8a74ba1baea95fa04da5570398b513d4bc1931993d14e3501749d35d" exitCode=0 Feb 23 09:22:55 crc kubenswrapper[4834]: I0223 09:22:55.684876 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kc8m2" event={"ID":"a4579d53-a261-495a-93a6-96ea3db448b1","Type":"ContainerDied","Data":"185f0a3d8a74ba1baea95fa04da5570398b513d4bc1931993d14e3501749d35d"} Feb 23 09:22:56 crc kubenswrapper[4834]: I0223 09:22:56.190646 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kc8m2" Feb 23 09:22:56 crc kubenswrapper[4834]: I0223 09:22:56.283328 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4579d53-a261-495a-93a6-96ea3db448b1-catalog-content\") pod \"a4579d53-a261-495a-93a6-96ea3db448b1\" (UID: \"a4579d53-a261-495a-93a6-96ea3db448b1\") " Feb 23 09:22:56 crc kubenswrapper[4834]: I0223 09:22:56.283429 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-94nbw\" (UniqueName: \"kubernetes.io/projected/a4579d53-a261-495a-93a6-96ea3db448b1-kube-api-access-94nbw\") pod \"a4579d53-a261-495a-93a6-96ea3db448b1\" (UID: \"a4579d53-a261-495a-93a6-96ea3db448b1\") " Feb 23 09:22:56 crc kubenswrapper[4834]: I0223 09:22:56.283480 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4579d53-a261-495a-93a6-96ea3db448b1-utilities\") pod \"a4579d53-a261-495a-93a6-96ea3db448b1\" (UID: \"a4579d53-a261-495a-93a6-96ea3db448b1\") " Feb 23 09:22:56 crc kubenswrapper[4834]: I0223 09:22:56.284915 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a4579d53-a261-495a-93a6-96ea3db448b1-utilities" (OuterVolumeSpecName: "utilities") pod "a4579d53-a261-495a-93a6-96ea3db448b1" (UID: "a4579d53-a261-495a-93a6-96ea3db448b1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:22:56 crc kubenswrapper[4834]: I0223 09:22:56.289863 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4579d53-a261-495a-93a6-96ea3db448b1-kube-api-access-94nbw" (OuterVolumeSpecName: "kube-api-access-94nbw") pod "a4579d53-a261-495a-93a6-96ea3db448b1" (UID: "a4579d53-a261-495a-93a6-96ea3db448b1"). InnerVolumeSpecName "kube-api-access-94nbw". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:22:56 crc kubenswrapper[4834]: I0223 09:22:56.385001 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-94nbw\" (UniqueName: \"kubernetes.io/projected/a4579d53-a261-495a-93a6-96ea3db448b1-kube-api-access-94nbw\") on node \"crc\" DevicePath \"\"" Feb 23 09:22:56 crc kubenswrapper[4834]: I0223 09:22:56.385038 4834 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a4579d53-a261-495a-93a6-96ea3db448b1-utilities\") on node \"crc\" DevicePath \"\"" Feb 23 09:22:56 crc kubenswrapper[4834]: I0223 09:22:56.418455 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a4579d53-a261-495a-93a6-96ea3db448b1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a4579d53-a261-495a-93a6-96ea3db448b1" (UID: "a4579d53-a261-495a-93a6-96ea3db448b1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:22:56 crc kubenswrapper[4834]: I0223 09:22:56.486070 4834 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a4579d53-a261-495a-93a6-96ea3db448b1-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 23 09:22:56 crc kubenswrapper[4834]: I0223 09:22:56.694643 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-kc8m2" event={"ID":"a4579d53-a261-495a-93a6-96ea3db448b1","Type":"ContainerDied","Data":"e9e7862af02f44b7c98d7cd3338131dab72f499ddfc025403bb79af9b804bea2"} Feb 23 09:22:56 crc kubenswrapper[4834]: I0223 09:22:56.696547 4834 scope.go:117] "RemoveContainer" containerID="185f0a3d8a74ba1baea95fa04da5570398b513d4bc1931993d14e3501749d35d" Feb 23 09:22:56 crc kubenswrapper[4834]: I0223 09:22:56.694789 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-kc8m2" Feb 23 09:22:56 crc kubenswrapper[4834]: I0223 09:22:56.720440 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-kc8m2"] Feb 23 09:22:56 crc kubenswrapper[4834]: I0223 09:22:56.728920 4834 scope.go:117] "RemoveContainer" containerID="b7f484b275940bf291d6f1a3c7e1069adef48360817bd0463243f588d4cd2f3f" Feb 23 09:22:56 crc kubenswrapper[4834]: I0223 09:22:56.729674 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-kc8m2"] Feb 23 09:22:56 crc kubenswrapper[4834]: I0223 09:22:56.747355 4834 scope.go:117] "RemoveContainer" containerID="ca3d9dbc3da0b5c31bacb4d1ea0e381a661a0a441ff64d91ab6ea2b1a623783b" Feb 23 09:22:57 crc kubenswrapper[4834]: I0223 09:22:57.811986 4834 patch_prober.go:28] interesting pod/machine-config-daemon-kt9lp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 23 09:22:57 crc kubenswrapper[4834]: I0223 09:22:57.812483 4834 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" podUID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 23 09:22:57 crc kubenswrapper[4834]: I0223 09:22:57.812546 4834 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" Feb 23 09:22:57 crc kubenswrapper[4834]: I0223 09:22:57.813233 4834 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d30a3a99709256ecf0353a89cecf613454777e4a41fa8a0ad21693babdbd0aa0"} pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Feb 23 09:22:57 crc kubenswrapper[4834]: I0223 09:22:57.813796 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" podUID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerName="machine-config-daemon" containerID="cri-o://d30a3a99709256ecf0353a89cecf613454777e4a41fa8a0ad21693babdbd0aa0" gracePeriod=600 Feb 23 09:22:58 crc kubenswrapper[4834]: I0223 09:22:58.594533 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4579d53-a261-495a-93a6-96ea3db448b1" path="/var/lib/kubelet/pods/a4579d53-a261-495a-93a6-96ea3db448b1/volumes" Feb 23 09:22:58 crc kubenswrapper[4834]: I0223 09:22:58.714999 4834 generic.go:334] "Generic (PLEG): container finished" podID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerID="d30a3a99709256ecf0353a89cecf613454777e4a41fa8a0ad21693babdbd0aa0" exitCode=0 Feb 23 09:22:58 crc kubenswrapper[4834]: I0223 09:22:58.715065 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" event={"ID":"1172b9a5-71ca-49e9-a033-3b59c9c024a4","Type":"ContainerDied","Data":"d30a3a99709256ecf0353a89cecf613454777e4a41fa8a0ad21693babdbd0aa0"} Feb 23 09:22:58 crc kubenswrapper[4834]: I0223 09:22:58.715119 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" event={"ID":"1172b9a5-71ca-49e9-a033-3b59c9c024a4","Type":"ContainerStarted","Data":"e59c484d4bdfdd7272bae9bdbb91660f55de2a6a5b7a9aa7c5bb5ff4443f7a0b"} Feb 23 09:22:58 crc kubenswrapper[4834]: I0223 09:22:58.715150 4834 scope.go:117] "RemoveContainer" containerID="8b47de6c39b49990870479ce2e9821e6b671e9454136c5cd3f39336a30c7e515" Feb 23 09:23:02 crc kubenswrapper[4834]: I0223 09:23:02.991642 4834 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5xgwt"] Feb 23 09:23:02 crc kubenswrapper[4834]: E0223 09:23:02.993102 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4579d53-a261-495a-93a6-96ea3db448b1" containerName="extract-utilities" Feb 23 09:23:02 crc kubenswrapper[4834]: I0223 09:23:02.993174 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4579d53-a261-495a-93a6-96ea3db448b1" containerName="extract-utilities" Feb 23 09:23:02 crc kubenswrapper[4834]: E0223 09:23:02.993233 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36931913-6ce1-4442-9640-194bcf85a32c" containerName="gather" Feb 23 09:23:02 crc kubenswrapper[4834]: I0223 09:23:02.993287 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="36931913-6ce1-4442-9640-194bcf85a32c" containerName="gather" Feb 23 09:23:02 crc kubenswrapper[4834]: E0223 09:23:02.993452 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36931913-6ce1-4442-9640-194bcf85a32c" containerName="copy" Feb 23 09:23:02 crc kubenswrapper[4834]: I0223 09:23:02.993525 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="36931913-6ce1-4442-9640-194bcf85a32c" containerName="copy" Feb 23 09:23:02 crc kubenswrapper[4834]: E0223 09:23:02.993588 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4579d53-a261-495a-93a6-96ea3db448b1" containerName="registry-server" Feb 23 09:23:02 crc kubenswrapper[4834]: I0223 09:23:02.993642 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4579d53-a261-495a-93a6-96ea3db448b1" containerName="registry-server" Feb 23 09:23:02 crc kubenswrapper[4834]: E0223 09:23:02.993692 4834 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4579d53-a261-495a-93a6-96ea3db448b1" containerName="extract-content" Feb 23 09:23:02 crc kubenswrapper[4834]: I0223 09:23:02.993745 4834 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4579d53-a261-495a-93a6-96ea3db448b1" containerName="extract-content" Feb 23 09:23:02 crc kubenswrapper[4834]: I0223 09:23:02.993882 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4579d53-a261-495a-93a6-96ea3db448b1" containerName="registry-server" Feb 23 09:23:02 crc kubenswrapper[4834]: I0223 09:23:02.993950 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="36931913-6ce1-4442-9640-194bcf85a32c" containerName="gather" Feb 23 09:23:02 crc kubenswrapper[4834]: I0223 09:23:02.994006 4834 memory_manager.go:354] "RemoveStaleState removing state" podUID="36931913-6ce1-4442-9640-194bcf85a32c" containerName="copy" Feb 23 09:23:02 crc kubenswrapper[4834]: I0223 09:23:02.994751 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5xgwt" Feb 23 09:23:03 crc kubenswrapper[4834]: I0223 09:23:03.004707 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5xgwt"] Feb 23 09:23:03 crc kubenswrapper[4834]: I0223 09:23:03.175150 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbc83155-15ab-49d9-9c11-6aefbe8bd946-catalog-content\") pod \"community-operators-5xgwt\" (UID: \"dbc83155-15ab-49d9-9c11-6aefbe8bd946\") " pod="openshift-marketplace/community-operators-5xgwt" Feb 23 09:23:03 crc kubenswrapper[4834]: I0223 09:23:03.175197 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msjgp\" (UniqueName: \"kubernetes.io/projected/dbc83155-15ab-49d9-9c11-6aefbe8bd946-kube-api-access-msjgp\") pod \"community-operators-5xgwt\" (UID: \"dbc83155-15ab-49d9-9c11-6aefbe8bd946\") " pod="openshift-marketplace/community-operators-5xgwt" Feb 23 09:23:03 crc kubenswrapper[4834]: I0223 09:23:03.175421 4834 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbc83155-15ab-49d9-9c11-6aefbe8bd946-utilities\") pod \"community-operators-5xgwt\" (UID: \"dbc83155-15ab-49d9-9c11-6aefbe8bd946\") " pod="openshift-marketplace/community-operators-5xgwt" Feb 23 09:23:03 crc kubenswrapper[4834]: I0223 09:23:03.276833 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbc83155-15ab-49d9-9c11-6aefbe8bd946-catalog-content\") pod \"community-operators-5xgwt\" (UID: \"dbc83155-15ab-49d9-9c11-6aefbe8bd946\") " pod="openshift-marketplace/community-operators-5xgwt" Feb 23 09:23:03 crc kubenswrapper[4834]: I0223 09:23:03.276888 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msjgp\" (UniqueName: \"kubernetes.io/projected/dbc83155-15ab-49d9-9c11-6aefbe8bd946-kube-api-access-msjgp\") pod \"community-operators-5xgwt\" (UID: \"dbc83155-15ab-49d9-9c11-6aefbe8bd946\") " pod="openshift-marketplace/community-operators-5xgwt" Feb 23 09:23:03 crc kubenswrapper[4834]: I0223 09:23:03.276975 4834 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbc83155-15ab-49d9-9c11-6aefbe8bd946-utilities\") pod \"community-operators-5xgwt\" (UID: \"dbc83155-15ab-49d9-9c11-6aefbe8bd946\") " pod="openshift-marketplace/community-operators-5xgwt" Feb 23 09:23:03 crc kubenswrapper[4834]: I0223 09:23:03.277390 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbc83155-15ab-49d9-9c11-6aefbe8bd946-catalog-content\") pod \"community-operators-5xgwt\" (UID: \"dbc83155-15ab-49d9-9c11-6aefbe8bd946\") " pod="openshift-marketplace/community-operators-5xgwt" Feb 23 09:23:03 crc kubenswrapper[4834]: I0223 09:23:03.277460 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbc83155-15ab-49d9-9c11-6aefbe8bd946-utilities\") pod \"community-operators-5xgwt\" (UID: \"dbc83155-15ab-49d9-9c11-6aefbe8bd946\") " pod="openshift-marketplace/community-operators-5xgwt" Feb 23 09:23:03 crc kubenswrapper[4834]: I0223 09:23:03.304717 4834 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msjgp\" (UniqueName: \"kubernetes.io/projected/dbc83155-15ab-49d9-9c11-6aefbe8bd946-kube-api-access-msjgp\") pod \"community-operators-5xgwt\" (UID: \"dbc83155-15ab-49d9-9c11-6aefbe8bd946\") " pod="openshift-marketplace/community-operators-5xgwt" Feb 23 09:23:03 crc kubenswrapper[4834]: I0223 09:23:03.329990 4834 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5xgwt" Feb 23 09:23:03 crc kubenswrapper[4834]: I0223 09:23:03.578715 4834 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5xgwt"] Feb 23 09:23:03 crc kubenswrapper[4834]: I0223 09:23:03.759667 4834 generic.go:334] "Generic (PLEG): container finished" podID="dbc83155-15ab-49d9-9c11-6aefbe8bd946" containerID="d9f3e1b8130049ca76a429bb6e5d9cc7effc7590303980ad403c33917a3bcc06" exitCode=0 Feb 23 09:23:03 crc kubenswrapper[4834]: I0223 09:23:03.759726 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5xgwt" event={"ID":"dbc83155-15ab-49d9-9c11-6aefbe8bd946","Type":"ContainerDied","Data":"d9f3e1b8130049ca76a429bb6e5d9cc7effc7590303980ad403c33917a3bcc06"} Feb 23 09:23:03 crc kubenswrapper[4834]: I0223 09:23:03.759760 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5xgwt" event={"ID":"dbc83155-15ab-49d9-9c11-6aefbe8bd946","Type":"ContainerStarted","Data":"5218a87f39998fac2948db826c8ef0bd775828da38d2c6887d8a599c03bbcf44"} Feb 23 09:23:05 crc kubenswrapper[4834]: I0223 09:23:05.775880 4834 generic.go:334] "Generic (PLEG): container finished" podID="dbc83155-15ab-49d9-9c11-6aefbe8bd946" containerID="2d1842891972a59a98dd6639cc11854e15b9fff1b3348d41d491428ccbc88e2f" exitCode=0 Feb 23 09:23:05 crc kubenswrapper[4834]: I0223 09:23:05.775982 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5xgwt" event={"ID":"dbc83155-15ab-49d9-9c11-6aefbe8bd946","Type":"ContainerDied","Data":"2d1842891972a59a98dd6639cc11854e15b9fff1b3348d41d491428ccbc88e2f"} Feb 23 09:23:06 crc kubenswrapper[4834]: I0223 09:23:06.783331 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5xgwt" event={"ID":"dbc83155-15ab-49d9-9c11-6aefbe8bd946","Type":"ContainerStarted","Data":"be9ec7d92070a91c26bd64e617032ae239da9a1d6c6d4c0da1c3b5da6617ac17"} Feb 23 09:23:06 crc kubenswrapper[4834]: I0223 09:23:06.806546 4834 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5xgwt" podStartSLOduration=2.096699525 podStartE2EDuration="4.806523s" podCreationTimestamp="2026-02-23 09:23:02 +0000 UTC" firstStartedPulling="2026-02-23 09:23:03.761203086 +0000 UTC m=+919.839517473" lastFinishedPulling="2026-02-23 09:23:06.471026521 +0000 UTC m=+922.549340948" observedRunningTime="2026-02-23 09:23:06.800185266 +0000 UTC m=+922.878499683" watchObservedRunningTime="2026-02-23 09:23:06.806523 +0000 UTC m=+922.884837397" Feb 23 09:23:13 crc kubenswrapper[4834]: I0223 09:23:13.330463 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5xgwt" Feb 23 09:23:13 crc kubenswrapper[4834]: I0223 09:23:13.331683 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5xgwt" Feb 23 09:23:13 crc kubenswrapper[4834]: I0223 09:23:13.382208 4834 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5xgwt" Feb 23 09:23:13 crc kubenswrapper[4834]: I0223 09:23:13.889248 4834 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5xgwt" Feb 23 09:23:13 crc kubenswrapper[4834]: I0223 09:23:13.942813 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5xgwt"] Feb 23 09:23:15 crc kubenswrapper[4834]: I0223 09:23:15.837035 4834 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5xgwt" podUID="dbc83155-15ab-49d9-9c11-6aefbe8bd946" containerName="registry-server" containerID="cri-o://be9ec7d92070a91c26bd64e617032ae239da9a1d6c6d4c0da1c3b5da6617ac17" gracePeriod=2 Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.253474 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5xgwt" Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.265068 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbc83155-15ab-49d9-9c11-6aefbe8bd946-catalog-content\") pod \"dbc83155-15ab-49d9-9c11-6aefbe8bd946\" (UID: \"dbc83155-15ab-49d9-9c11-6aefbe8bd946\") " Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.265379 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbc83155-15ab-49d9-9c11-6aefbe8bd946-utilities\") pod \"dbc83155-15ab-49d9-9c11-6aefbe8bd946\" (UID: \"dbc83155-15ab-49d9-9c11-6aefbe8bd946\") " Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.267694 4834 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-msjgp\" (UniqueName: \"kubernetes.io/projected/dbc83155-15ab-49d9-9c11-6aefbe8bd946-kube-api-access-msjgp\") pod \"dbc83155-15ab-49d9-9c11-6aefbe8bd946\" (UID: \"dbc83155-15ab-49d9-9c11-6aefbe8bd946\") " Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.266989 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbc83155-15ab-49d9-9c11-6aefbe8bd946-utilities" (OuterVolumeSpecName: "utilities") pod "dbc83155-15ab-49d9-9c11-6aefbe8bd946" (UID: "dbc83155-15ab-49d9-9c11-6aefbe8bd946"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.268348 4834 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dbc83155-15ab-49d9-9c11-6aefbe8bd946-utilities\") on node \"crc\" DevicePath \"\"" Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.281194 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbc83155-15ab-49d9-9c11-6aefbe8bd946-kube-api-access-msjgp" (OuterVolumeSpecName: "kube-api-access-msjgp") pod "dbc83155-15ab-49d9-9c11-6aefbe8bd946" (UID: "dbc83155-15ab-49d9-9c11-6aefbe8bd946"). InnerVolumeSpecName "kube-api-access-msjgp". PluginName "kubernetes.io/projected", VolumeGidValue "" Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.334624 4834 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbc83155-15ab-49d9-9c11-6aefbe8bd946-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dbc83155-15ab-49d9-9c11-6aefbe8bd946" (UID: "dbc83155-15ab-49d9-9c11-6aefbe8bd946"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.370047 4834 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dbc83155-15ab-49d9-9c11-6aefbe8bd946-catalog-content\") on node \"crc\" DevicePath \"\"" Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.370099 4834 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-msjgp\" (UniqueName: \"kubernetes.io/projected/dbc83155-15ab-49d9-9c11-6aefbe8bd946-kube-api-access-msjgp\") on node \"crc\" DevicePath \"\"" Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.847087 4834 generic.go:334] "Generic (PLEG): container finished" podID="dbc83155-15ab-49d9-9c11-6aefbe8bd946" containerID="be9ec7d92070a91c26bd64e617032ae239da9a1d6c6d4c0da1c3b5da6617ac17" exitCode=0 Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.847140 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5xgwt" event={"ID":"dbc83155-15ab-49d9-9c11-6aefbe8bd946","Type":"ContainerDied","Data":"be9ec7d92070a91c26bd64e617032ae239da9a1d6c6d4c0da1c3b5da6617ac17"} Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.847177 4834 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5xgwt" event={"ID":"dbc83155-15ab-49d9-9c11-6aefbe8bd946","Type":"ContainerDied","Data":"5218a87f39998fac2948db826c8ef0bd775828da38d2c6887d8a599c03bbcf44"} Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.847209 4834 scope.go:117] "RemoveContainer" containerID="be9ec7d92070a91c26bd64e617032ae239da9a1d6c6d4c0da1c3b5da6617ac17" Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.847207 4834 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5xgwt" Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.870906 4834 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5xgwt"] Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.879345 4834 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5xgwt"] Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.884573 4834 scope.go:117] "RemoveContainer" containerID="2d1842891972a59a98dd6639cc11854e15b9fff1b3348d41d491428ccbc88e2f" Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.902850 4834 scope.go:117] "RemoveContainer" containerID="d9f3e1b8130049ca76a429bb6e5d9cc7effc7590303980ad403c33917a3bcc06" Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.919716 4834 scope.go:117] "RemoveContainer" containerID="be9ec7d92070a91c26bd64e617032ae239da9a1d6c6d4c0da1c3b5da6617ac17" Feb 23 09:23:16 crc kubenswrapper[4834]: E0223 09:23:16.920229 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be9ec7d92070a91c26bd64e617032ae239da9a1d6c6d4c0da1c3b5da6617ac17\": container with ID starting with be9ec7d92070a91c26bd64e617032ae239da9a1d6c6d4c0da1c3b5da6617ac17 not found: ID does not exist" containerID="be9ec7d92070a91c26bd64e617032ae239da9a1d6c6d4c0da1c3b5da6617ac17" Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.920275 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be9ec7d92070a91c26bd64e617032ae239da9a1d6c6d4c0da1c3b5da6617ac17"} err="failed to get container status \"be9ec7d92070a91c26bd64e617032ae239da9a1d6c6d4c0da1c3b5da6617ac17\": rpc error: code = NotFound desc = could not find container \"be9ec7d92070a91c26bd64e617032ae239da9a1d6c6d4c0da1c3b5da6617ac17\": container with ID starting with be9ec7d92070a91c26bd64e617032ae239da9a1d6c6d4c0da1c3b5da6617ac17 not found: ID does not exist" Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.920307 4834 scope.go:117] "RemoveContainer" containerID="2d1842891972a59a98dd6639cc11854e15b9fff1b3348d41d491428ccbc88e2f" Feb 23 09:23:16 crc kubenswrapper[4834]: E0223 09:23:16.920774 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d1842891972a59a98dd6639cc11854e15b9fff1b3348d41d491428ccbc88e2f\": container with ID starting with 2d1842891972a59a98dd6639cc11854e15b9fff1b3348d41d491428ccbc88e2f not found: ID does not exist" containerID="2d1842891972a59a98dd6639cc11854e15b9fff1b3348d41d491428ccbc88e2f" Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.920796 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d1842891972a59a98dd6639cc11854e15b9fff1b3348d41d491428ccbc88e2f"} err="failed to get container status \"2d1842891972a59a98dd6639cc11854e15b9fff1b3348d41d491428ccbc88e2f\": rpc error: code = NotFound desc = could not find container \"2d1842891972a59a98dd6639cc11854e15b9fff1b3348d41d491428ccbc88e2f\": container with ID starting with 2d1842891972a59a98dd6639cc11854e15b9fff1b3348d41d491428ccbc88e2f not found: ID does not exist" Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.920809 4834 scope.go:117] "RemoveContainer" containerID="d9f3e1b8130049ca76a429bb6e5d9cc7effc7590303980ad403c33917a3bcc06" Feb 23 09:23:16 crc kubenswrapper[4834]: E0223 09:23:16.921082 4834 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d9f3e1b8130049ca76a429bb6e5d9cc7effc7590303980ad403c33917a3bcc06\": container with ID starting with d9f3e1b8130049ca76a429bb6e5d9cc7effc7590303980ad403c33917a3bcc06 not found: ID does not exist" containerID="d9f3e1b8130049ca76a429bb6e5d9cc7effc7590303980ad403c33917a3bcc06" Feb 23 09:23:16 crc kubenswrapper[4834]: I0223 09:23:16.921110 4834 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d9f3e1b8130049ca76a429bb6e5d9cc7effc7590303980ad403c33917a3bcc06"} err="failed to get container status \"d9f3e1b8130049ca76a429bb6e5d9cc7effc7590303980ad403c33917a3bcc06\": rpc error: code = NotFound desc = could not find container \"d9f3e1b8130049ca76a429bb6e5d9cc7effc7590303980ad403c33917a3bcc06\": container with ID starting with d9f3e1b8130049ca76a429bb6e5d9cc7effc7590303980ad403c33917a3bcc06 not found: ID does not exist" Feb 23 09:23:18 crc kubenswrapper[4834]: I0223 09:23:18.596573 4834 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dbc83155-15ab-49d9-9c11-6aefbe8bd946" path="/var/lib/kubelet/pods/dbc83155-15ab-49d9-9c11-6aefbe8bd946/volumes" Feb 23 09:25:27 crc kubenswrapper[4834]: I0223 09:25:27.810780 4834 patch_prober.go:28] interesting pod/machine-config-daemon-kt9lp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 23 09:25:27 crc kubenswrapper[4834]: I0223 09:25:27.813499 4834 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" podUID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Feb 23 09:25:57 crc kubenswrapper[4834]: I0223 09:25:57.811137 4834 patch_prober.go:28] interesting pod/machine-config-daemon-kt9lp container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Feb 23 09:25:57 crc kubenswrapper[4834]: I0223 09:25:57.812600 4834 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-kt9lp" podUID="1172b9a5-71ca-49e9-a033-3b59c9c024a4" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515147016503024447 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015147016503017364 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015147014026016505 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015147014026015455 5ustar corecore